1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2013 - 2018 Intel Corporation. */ |
3 | |
4 | #include <linux/delay.h> |
5 | #include "i40e_alloc.h" |
6 | #include "i40e_register.h" |
7 | #include "i40e_prototype.h" |
8 | |
9 | static void i40e_resume_aq(struct i40e_hw *hw); |
10 | |
11 | /** |
12 | * i40e_adminq_init_regs - Initialize AdminQ registers |
13 | * @hw: pointer to the hardware structure |
14 | * |
15 | * This assumes the alloc_asq and alloc_arq functions have already been called |
16 | **/ |
17 | static void i40e_adminq_init_regs(struct i40e_hw *hw) |
18 | { |
19 | /* set head and tail registers in our local struct */ |
20 | if (i40e_is_vf(hw)) { |
21 | hw->aq.asq.tail = I40E_VF_ATQT1; |
22 | hw->aq.asq.head = I40E_VF_ATQH1; |
23 | hw->aq.asq.len = I40E_VF_ATQLEN1; |
24 | hw->aq.asq.bal = I40E_VF_ATQBAL1; |
25 | hw->aq.asq.bah = I40E_VF_ATQBAH1; |
26 | hw->aq.arq.tail = I40E_VF_ARQT1; |
27 | hw->aq.arq.head = I40E_VF_ARQH1; |
28 | hw->aq.arq.len = I40E_VF_ARQLEN1; |
29 | hw->aq.arq.bal = I40E_VF_ARQBAL1; |
30 | hw->aq.arq.bah = I40E_VF_ARQBAH1; |
31 | } else { |
32 | hw->aq.asq.tail = I40E_PF_ATQT; |
33 | hw->aq.asq.head = I40E_PF_ATQH; |
34 | hw->aq.asq.len = I40E_PF_ATQLEN; |
35 | hw->aq.asq.bal = I40E_PF_ATQBAL; |
36 | hw->aq.asq.bah = I40E_PF_ATQBAH; |
37 | hw->aq.arq.tail = I40E_PF_ARQT; |
38 | hw->aq.arq.head = I40E_PF_ARQH; |
39 | hw->aq.arq.len = I40E_PF_ARQLEN; |
40 | hw->aq.arq.bal = I40E_PF_ARQBAL; |
41 | hw->aq.arq.bah = I40E_PF_ARQBAH; |
42 | } |
43 | } |
44 | |
45 | /** |
46 | * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings |
47 | * @hw: pointer to the hardware structure |
48 | **/ |
49 | static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) |
50 | { |
51 | int ret_code; |
52 | |
53 | ret_code = i40e_allocate_dma_mem(hw, mem: &hw->aq.asq.desc_buf, |
54 | size: (hw->aq.num_asq_entries * |
55 | sizeof(struct i40e_aq_desc)), |
56 | I40E_ADMINQ_DESC_ALIGNMENT); |
57 | if (ret_code) |
58 | return ret_code; |
59 | |
60 | ret_code = i40e_allocate_virt_mem(hw, mem: &hw->aq.asq.cmd_buf, |
61 | size: (hw->aq.num_asq_entries * |
62 | sizeof(struct i40e_asq_cmd_details))); |
63 | if (ret_code) { |
64 | i40e_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
65 | return ret_code; |
66 | } |
67 | |
68 | return ret_code; |
69 | } |
70 | |
71 | /** |
72 | * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings |
73 | * @hw: pointer to the hardware structure |
74 | **/ |
75 | static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) |
76 | { |
77 | int ret_code; |
78 | |
79 | ret_code = i40e_allocate_dma_mem(hw, mem: &hw->aq.arq.desc_buf, |
80 | size: (hw->aq.num_arq_entries * |
81 | sizeof(struct i40e_aq_desc)), |
82 | I40E_ADMINQ_DESC_ALIGNMENT); |
83 | |
84 | return ret_code; |
85 | } |
86 | |
87 | /** |
88 | * i40e_free_adminq_asq - Free Admin Queue send rings |
89 | * @hw: pointer to the hardware structure |
90 | * |
91 | * This assumes the posted send buffers have already been cleaned |
92 | * and de-allocated |
93 | **/ |
94 | static void i40e_free_adminq_asq(struct i40e_hw *hw) |
95 | { |
96 | i40e_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
97 | } |
98 | |
99 | /** |
100 | * i40e_free_adminq_arq - Free Admin Queue receive rings |
101 | * @hw: pointer to the hardware structure |
102 | * |
103 | * This assumes the posted receive buffers have already been cleaned |
104 | * and de-allocated |
105 | **/ |
106 | static void i40e_free_adminq_arq(struct i40e_hw *hw) |
107 | { |
108 | i40e_free_dma_mem(hw, mem: &hw->aq.arq.desc_buf); |
109 | } |
110 | |
111 | /** |
112 | * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue |
113 | * @hw: pointer to the hardware structure |
114 | **/ |
115 | static int i40e_alloc_arq_bufs(struct i40e_hw *hw) |
116 | { |
117 | struct i40e_aq_desc *desc; |
118 | struct i40e_dma_mem *bi; |
119 | int ret_code; |
120 | int i; |
121 | |
122 | /* We'll be allocating the buffer info memory first, then we can |
123 | * allocate the mapped buffers for the event processing |
124 | */ |
125 | |
126 | /* buffer_info structures do not need alignment */ |
127 | ret_code = i40e_allocate_virt_mem(hw, mem: &hw->aq.arq.dma_head, |
128 | size: (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); |
129 | if (ret_code) |
130 | goto alloc_arq_bufs; |
131 | hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; |
132 | |
133 | /* allocate the mapped buffers */ |
134 | for (i = 0; i < hw->aq.num_arq_entries; i++) { |
135 | bi = &hw->aq.arq.r.arq_bi[i]; |
136 | ret_code = i40e_allocate_dma_mem(hw, mem: bi, |
137 | size: hw->aq.arq_buf_size, |
138 | I40E_ADMINQ_DESC_ALIGNMENT); |
139 | if (ret_code) |
140 | goto unwind_alloc_arq_bufs; |
141 | |
142 | /* now configure the descriptors for use */ |
143 | desc = I40E_ADMINQ_DESC(hw->aq.arq, i); |
144 | |
145 | desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); |
146 | if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) |
147 | desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); |
148 | desc->opcode = 0; |
149 | /* This is in accordance with Admin queue design, there is no |
150 | * register for buffer size configuration |
151 | */ |
152 | desc->datalen = cpu_to_le16((u16)bi->size); |
153 | desc->retval = 0; |
154 | desc->cookie_high = 0; |
155 | desc->cookie_low = 0; |
156 | desc->params.external.addr_high = |
157 | cpu_to_le32(upper_32_bits(bi->pa)); |
158 | desc->params.external.addr_low = |
159 | cpu_to_le32(lower_32_bits(bi->pa)); |
160 | desc->params.external.param0 = 0; |
161 | desc->params.external.param1 = 0; |
162 | } |
163 | |
164 | alloc_arq_bufs: |
165 | return ret_code; |
166 | |
167 | unwind_alloc_arq_bufs: |
168 | /* don't try to free the one that failed... */ |
169 | i--; |
170 | for (; i >= 0; i--) |
171 | i40e_free_dma_mem(hw, mem: &hw->aq.arq.r.arq_bi[i]); |
172 | i40e_free_virt_mem(hw, mem: &hw->aq.arq.dma_head); |
173 | |
174 | return ret_code; |
175 | } |
176 | |
177 | /** |
178 | * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue |
179 | * @hw: pointer to the hardware structure |
180 | **/ |
181 | static int i40e_alloc_asq_bufs(struct i40e_hw *hw) |
182 | { |
183 | struct i40e_dma_mem *bi; |
184 | int ret_code; |
185 | int i; |
186 | |
187 | /* No mapped memory needed yet, just the buffer info structures */ |
188 | ret_code = i40e_allocate_virt_mem(hw, mem: &hw->aq.asq.dma_head, |
189 | size: (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); |
190 | if (ret_code) |
191 | goto alloc_asq_bufs; |
192 | hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; |
193 | |
194 | /* allocate the mapped buffers */ |
195 | for (i = 0; i < hw->aq.num_asq_entries; i++) { |
196 | bi = &hw->aq.asq.r.asq_bi[i]; |
197 | ret_code = i40e_allocate_dma_mem(hw, mem: bi, |
198 | size: hw->aq.asq_buf_size, |
199 | I40E_ADMINQ_DESC_ALIGNMENT); |
200 | if (ret_code) |
201 | goto unwind_alloc_asq_bufs; |
202 | } |
203 | alloc_asq_bufs: |
204 | return ret_code; |
205 | |
206 | unwind_alloc_asq_bufs: |
207 | /* don't try to free the one that failed... */ |
208 | i--; |
209 | for (; i >= 0; i--) |
210 | i40e_free_dma_mem(hw, mem: &hw->aq.asq.r.asq_bi[i]); |
211 | i40e_free_virt_mem(hw, mem: &hw->aq.asq.dma_head); |
212 | |
213 | return ret_code; |
214 | } |
215 | |
216 | /** |
217 | * i40e_free_arq_bufs - Free receive queue buffer info elements |
218 | * @hw: pointer to the hardware structure |
219 | **/ |
220 | static void i40e_free_arq_bufs(struct i40e_hw *hw) |
221 | { |
222 | int i; |
223 | |
224 | /* free descriptors */ |
225 | for (i = 0; i < hw->aq.num_arq_entries; i++) |
226 | i40e_free_dma_mem(hw, mem: &hw->aq.arq.r.arq_bi[i]); |
227 | |
228 | /* free the descriptor memory */ |
229 | i40e_free_dma_mem(hw, mem: &hw->aq.arq.desc_buf); |
230 | |
231 | /* free the dma header */ |
232 | i40e_free_virt_mem(hw, mem: &hw->aq.arq.dma_head); |
233 | } |
234 | |
235 | /** |
236 | * i40e_free_asq_bufs - Free send queue buffer info elements |
237 | * @hw: pointer to the hardware structure |
238 | **/ |
239 | static void i40e_free_asq_bufs(struct i40e_hw *hw) |
240 | { |
241 | int i; |
242 | |
243 | /* only unmap if the address is non-NULL */ |
244 | for (i = 0; i < hw->aq.num_asq_entries; i++) |
245 | if (hw->aq.asq.r.asq_bi[i].pa) |
246 | i40e_free_dma_mem(hw, mem: &hw->aq.asq.r.asq_bi[i]); |
247 | |
248 | /* free the buffer info list */ |
249 | i40e_free_virt_mem(hw, mem: &hw->aq.asq.cmd_buf); |
250 | |
251 | /* free the descriptor memory */ |
252 | i40e_free_dma_mem(hw, mem: &hw->aq.asq.desc_buf); |
253 | |
254 | /* free the dma header */ |
255 | i40e_free_virt_mem(hw, mem: &hw->aq.asq.dma_head); |
256 | } |
257 | |
258 | /** |
259 | * i40e_config_asq_regs - configure ASQ registers |
260 | * @hw: pointer to the hardware structure |
261 | * |
262 | * Configure base address and length registers for the transmit queue |
263 | **/ |
264 | static int i40e_config_asq_regs(struct i40e_hw *hw) |
265 | { |
266 | int ret_code = 0; |
267 | u32 reg = 0; |
268 | |
269 | /* Clear Head and Tail */ |
270 | wr32(hw, hw->aq.asq.head, 0); |
271 | wr32(hw, hw->aq.asq.tail, 0); |
272 | |
273 | /* set starting point */ |
274 | wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | |
275 | I40E_PF_ATQLEN_ATQENABLE_MASK)); |
276 | wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); |
277 | wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); |
278 | |
279 | /* Check one register to verify that config was applied */ |
280 | reg = rd32(hw, hw->aq.asq.bal); |
281 | if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) |
282 | ret_code = -EIO; |
283 | |
284 | return ret_code; |
285 | } |
286 | |
287 | /** |
288 | * i40e_config_arq_regs - ARQ register configuration |
289 | * @hw: pointer to the hardware structure |
290 | * |
291 | * Configure base address and length registers for the receive (event queue) |
292 | **/ |
293 | static int i40e_config_arq_regs(struct i40e_hw *hw) |
294 | { |
295 | int ret_code = 0; |
296 | u32 reg = 0; |
297 | |
298 | /* Clear Head and Tail */ |
299 | wr32(hw, hw->aq.arq.head, 0); |
300 | wr32(hw, hw->aq.arq.tail, 0); |
301 | |
302 | /* set starting point */ |
303 | wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | |
304 | I40E_PF_ARQLEN_ARQENABLE_MASK)); |
305 | wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); |
306 | wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); |
307 | |
308 | /* Update tail in the HW to post pre-allocated buffers */ |
309 | wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); |
310 | |
311 | /* Check one register to verify that config was applied */ |
312 | reg = rd32(hw, hw->aq.arq.bal); |
313 | if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) |
314 | ret_code = -EIO; |
315 | |
316 | return ret_code; |
317 | } |
318 | |
319 | /** |
320 | * i40e_init_asq - main initialization routine for ASQ |
321 | * @hw: pointer to the hardware structure |
322 | * |
323 | * This is the main initialization routine for the Admin Send Queue |
324 | * Prior to calling this function, drivers *MUST* set the following fields |
325 | * in the hw->aq structure: |
326 | * - hw->aq.num_asq_entries |
327 | * - hw->aq.arq_buf_size |
328 | * |
329 | * Do *NOT* hold the lock when calling this as the memory allocation routines |
330 | * called are not going to be atomic context safe |
331 | **/ |
332 | static int i40e_init_asq(struct i40e_hw *hw) |
333 | { |
334 | int ret_code = 0; |
335 | |
336 | if (hw->aq.asq.count > 0) { |
337 | /* queue already initialized */ |
338 | ret_code = -EBUSY; |
339 | goto init_adminq_exit; |
340 | } |
341 | |
342 | /* verify input for valid configuration */ |
343 | if ((hw->aq.num_asq_entries == 0) || |
344 | (hw->aq.asq_buf_size == 0)) { |
345 | ret_code = -EIO; |
346 | goto init_adminq_exit; |
347 | } |
348 | |
349 | hw->aq.asq.next_to_use = 0; |
350 | hw->aq.asq.next_to_clean = 0; |
351 | |
352 | /* allocate the ring memory */ |
353 | ret_code = i40e_alloc_adminq_asq_ring(hw); |
354 | if (ret_code) |
355 | goto init_adminq_exit; |
356 | |
357 | /* allocate buffers in the rings */ |
358 | ret_code = i40e_alloc_asq_bufs(hw); |
359 | if (ret_code) |
360 | goto init_adminq_free_rings; |
361 | |
362 | /* initialize base registers */ |
363 | ret_code = i40e_config_asq_regs(hw); |
364 | if (ret_code) |
365 | goto init_adminq_free_rings; |
366 | |
367 | /* success! */ |
368 | hw->aq.asq.count = hw->aq.num_asq_entries; |
369 | goto init_adminq_exit; |
370 | |
371 | init_adminq_free_rings: |
372 | i40e_free_adminq_asq(hw); |
373 | |
374 | init_adminq_exit: |
375 | return ret_code; |
376 | } |
377 | |
378 | /** |
379 | * i40e_init_arq - initialize ARQ |
380 | * @hw: pointer to the hardware structure |
381 | * |
382 | * The main initialization routine for the Admin Receive (Event) Queue. |
383 | * Prior to calling this function, drivers *MUST* set the following fields |
384 | * in the hw->aq structure: |
385 | * - hw->aq.num_asq_entries |
386 | * - hw->aq.arq_buf_size |
387 | * |
388 | * Do *NOT* hold the lock when calling this as the memory allocation routines |
389 | * called are not going to be atomic context safe |
390 | **/ |
391 | static int i40e_init_arq(struct i40e_hw *hw) |
392 | { |
393 | int ret_code = 0; |
394 | |
395 | if (hw->aq.arq.count > 0) { |
396 | /* queue already initialized */ |
397 | ret_code = -EBUSY; |
398 | goto init_adminq_exit; |
399 | } |
400 | |
401 | /* verify input for valid configuration */ |
402 | if ((hw->aq.num_arq_entries == 0) || |
403 | (hw->aq.arq_buf_size == 0)) { |
404 | ret_code = -EIO; |
405 | goto init_adminq_exit; |
406 | } |
407 | |
408 | hw->aq.arq.next_to_use = 0; |
409 | hw->aq.arq.next_to_clean = 0; |
410 | |
411 | /* allocate the ring memory */ |
412 | ret_code = i40e_alloc_adminq_arq_ring(hw); |
413 | if (ret_code) |
414 | goto init_adminq_exit; |
415 | |
416 | /* allocate buffers in the rings */ |
417 | ret_code = i40e_alloc_arq_bufs(hw); |
418 | if (ret_code) |
419 | goto init_adminq_free_rings; |
420 | |
421 | /* initialize base registers */ |
422 | ret_code = i40e_config_arq_regs(hw); |
423 | if (ret_code) |
424 | goto init_adminq_free_rings; |
425 | |
426 | /* success! */ |
427 | hw->aq.arq.count = hw->aq.num_arq_entries; |
428 | goto init_adminq_exit; |
429 | |
430 | init_adminq_free_rings: |
431 | i40e_free_adminq_arq(hw); |
432 | |
433 | init_adminq_exit: |
434 | return ret_code; |
435 | } |
436 | |
437 | /** |
438 | * i40e_shutdown_asq - shutdown the ASQ |
439 | * @hw: pointer to the hardware structure |
440 | * |
441 | * The main shutdown routine for the Admin Send Queue |
442 | **/ |
443 | static int i40e_shutdown_asq(struct i40e_hw *hw) |
444 | { |
445 | int ret_code = 0; |
446 | |
447 | mutex_lock(&hw->aq.asq_mutex); |
448 | |
449 | if (hw->aq.asq.count == 0) { |
450 | ret_code = -EBUSY; |
451 | goto shutdown_asq_out; |
452 | } |
453 | |
454 | /* Stop firmware AdminQ processing */ |
455 | wr32(hw, hw->aq.asq.head, 0); |
456 | wr32(hw, hw->aq.asq.tail, 0); |
457 | wr32(hw, hw->aq.asq.len, 0); |
458 | wr32(hw, hw->aq.asq.bal, 0); |
459 | wr32(hw, hw->aq.asq.bah, 0); |
460 | |
461 | hw->aq.asq.count = 0; /* to indicate uninitialized queue */ |
462 | |
463 | /* free ring buffers */ |
464 | i40e_free_asq_bufs(hw); |
465 | |
466 | shutdown_asq_out: |
467 | mutex_unlock(lock: &hw->aq.asq_mutex); |
468 | return ret_code; |
469 | } |
470 | |
471 | /** |
472 | * i40e_shutdown_arq - shutdown ARQ |
473 | * @hw: pointer to the hardware structure |
474 | * |
475 | * The main shutdown routine for the Admin Receive Queue |
476 | **/ |
477 | static int i40e_shutdown_arq(struct i40e_hw *hw) |
478 | { |
479 | int ret_code = 0; |
480 | |
481 | mutex_lock(&hw->aq.arq_mutex); |
482 | |
483 | if (hw->aq.arq.count == 0) { |
484 | ret_code = -EBUSY; |
485 | goto shutdown_arq_out; |
486 | } |
487 | |
488 | /* Stop firmware AdminQ processing */ |
489 | wr32(hw, hw->aq.arq.head, 0); |
490 | wr32(hw, hw->aq.arq.tail, 0); |
491 | wr32(hw, hw->aq.arq.len, 0); |
492 | wr32(hw, hw->aq.arq.bal, 0); |
493 | wr32(hw, hw->aq.arq.bah, 0); |
494 | |
495 | hw->aq.arq.count = 0; /* to indicate uninitialized queue */ |
496 | |
497 | /* free ring buffers */ |
498 | i40e_free_arq_bufs(hw); |
499 | |
500 | shutdown_arq_out: |
501 | mutex_unlock(lock: &hw->aq.arq_mutex); |
502 | return ret_code; |
503 | } |
504 | |
505 | /** |
506 | * i40e_set_hw_flags - set HW flags |
507 | * @hw: pointer to the hardware structure |
508 | **/ |
509 | static void i40e_set_hw_flags(struct i40e_hw *hw) |
510 | { |
511 | struct i40e_adminq_info *aq = &hw->aq; |
512 | |
513 | hw->flags = 0; |
514 | |
515 | switch (hw->mac.type) { |
516 | case I40E_MAC_XL710: |
517 | if (aq->api_maj_ver > 1 || |
518 | (aq->api_maj_ver == 1 && |
519 | aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { |
520 | hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; |
521 | hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; |
522 | /* The ability to RX (not drop) 802.1ad frames */ |
523 | hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; |
524 | } |
525 | break; |
526 | case I40E_MAC_X722: |
527 | hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | |
528 | I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; |
529 | |
530 | if (aq->api_maj_ver > 1 || |
531 | (aq->api_maj_ver == 1 && |
532 | aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) |
533 | hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; |
534 | |
535 | if (aq->api_maj_ver > 1 || |
536 | (aq->api_maj_ver == 1 && |
537 | aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) |
538 | hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; |
539 | |
540 | if (aq->api_maj_ver > 1 || |
541 | (aq->api_maj_ver == 1 && |
542 | aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) |
543 | hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; |
544 | |
545 | fallthrough; |
546 | default: |
547 | break; |
548 | } |
549 | |
550 | /* Newer versions of firmware require lock when reading the NVM */ |
551 | if (aq->api_maj_ver > 1 || |
552 | (aq->api_maj_ver == 1 && |
553 | aq->api_min_ver >= 5)) |
554 | hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; |
555 | |
556 | if (aq->api_maj_ver > 1 || |
557 | (aq->api_maj_ver == 1 && |
558 | aq->api_min_ver >= 8)) { |
559 | hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; |
560 | hw->flags |= I40E_HW_FLAG_DROP_MODE; |
561 | } |
562 | |
563 | if (aq->api_maj_ver > 1 || |
564 | (aq->api_maj_ver == 1 && |
565 | aq->api_min_ver >= 9)) |
566 | hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; |
567 | } |
568 | |
569 | /** |
570 | * i40e_init_adminq - main initialization routine for Admin Queue |
571 | * @hw: pointer to the hardware structure |
572 | * |
573 | * Prior to calling this function, drivers *MUST* set the following fields |
574 | * in the hw->aq structure: |
575 | * - hw->aq.num_asq_entries |
576 | * - hw->aq.num_arq_entries |
577 | * - hw->aq.arq_buf_size |
578 | * - hw->aq.asq_buf_size |
579 | **/ |
580 | int i40e_init_adminq(struct i40e_hw *hw) |
581 | { |
582 | u16 cfg_ptr, oem_hi, oem_lo; |
583 | u16 eetrack_lo, eetrack_hi; |
584 | int retry = 0; |
585 | int ret_code; |
586 | |
587 | /* verify input for valid configuration */ |
588 | if ((hw->aq.num_arq_entries == 0) || |
589 | (hw->aq.num_asq_entries == 0) || |
590 | (hw->aq.arq_buf_size == 0) || |
591 | (hw->aq.asq_buf_size == 0)) { |
592 | ret_code = -EIO; |
593 | goto init_adminq_exit; |
594 | } |
595 | |
596 | /* Set up register offsets */ |
597 | i40e_adminq_init_regs(hw); |
598 | |
599 | /* setup ASQ command write back timeout */ |
600 | hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; |
601 | |
602 | /* allocate the ASQ */ |
603 | ret_code = i40e_init_asq(hw); |
604 | if (ret_code) |
605 | goto init_adminq_destroy_locks; |
606 | |
607 | /* allocate the ARQ */ |
608 | ret_code = i40e_init_arq(hw); |
609 | if (ret_code) |
610 | goto init_adminq_free_asq; |
611 | |
612 | /* There are some cases where the firmware may not be quite ready |
613 | * for AdminQ operations, so we retry the AdminQ setup a few times |
614 | * if we see timeouts in this first AQ call. |
615 | */ |
616 | do { |
617 | ret_code = i40e_aq_get_firmware_version(hw, |
618 | fw_major_version: &hw->aq.fw_maj_ver, |
619 | fw_minor_version: &hw->aq.fw_min_ver, |
620 | fw_build: &hw->aq.fw_build, |
621 | api_major_version: &hw->aq.api_maj_ver, |
622 | api_minor_version: &hw->aq.api_min_ver, |
623 | NULL); |
624 | if (ret_code != -EIO) |
625 | break; |
626 | retry++; |
627 | msleep(msecs: 100); |
628 | i40e_resume_aq(hw); |
629 | } while (retry < 10); |
630 | if (ret_code != 0) |
631 | goto init_adminq_free_arq; |
632 | |
633 | /* Some features were introduced in different FW API version |
634 | * for different MAC type. |
635 | */ |
636 | i40e_set_hw_flags(hw); |
637 | |
638 | /* get the NVM version info */ |
639 | i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, |
640 | data: &hw->nvm.version); |
641 | i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, data: &eetrack_lo); |
642 | i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, data: &eetrack_hi); |
643 | hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; |
644 | i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, data: &cfg_ptr); |
645 | i40e_read_nvm_word(hw, offset: (cfg_ptr + I40E_NVM_OEM_VER_OFF), |
646 | data: &oem_hi); |
647 | i40e_read_nvm_word(hw, offset: (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), |
648 | data: &oem_lo); |
649 | hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; |
650 | |
651 | if (hw->mac.type == I40E_MAC_XL710 && |
652 | hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && |
653 | hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { |
654 | hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; |
655 | hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; |
656 | } |
657 | if (hw->mac.type == I40E_MAC_X722 && |
658 | hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && |
659 | hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { |
660 | hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; |
661 | } |
662 | |
663 | /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ |
664 | if (hw->aq.api_maj_ver > 1 || |
665 | (hw->aq.api_maj_ver == 1 && |
666 | hw->aq.api_min_ver >= 7)) |
667 | hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; |
668 | |
669 | if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { |
670 | ret_code = -EIO; |
671 | goto init_adminq_free_arq; |
672 | } |
673 | |
674 | /* pre-emptive resource lock release */ |
675 | i40e_aq_release_resource(hw, resource: I40E_NVM_RESOURCE_ID, sdp_number: 0, NULL); |
676 | hw->nvm_release_on_done = false; |
677 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; |
678 | |
679 | ret_code = 0; |
680 | |
681 | /* success! */ |
682 | goto init_adminq_exit; |
683 | |
684 | init_adminq_free_arq: |
685 | i40e_shutdown_arq(hw); |
686 | init_adminq_free_asq: |
687 | i40e_shutdown_asq(hw); |
688 | init_adminq_destroy_locks: |
689 | |
690 | init_adminq_exit: |
691 | return ret_code; |
692 | } |
693 | |
694 | /** |
695 | * i40e_shutdown_adminq - shutdown routine for the Admin Queue |
696 | * @hw: pointer to the hardware structure |
697 | **/ |
698 | void i40e_shutdown_adminq(struct i40e_hw *hw) |
699 | { |
700 | if (i40e_check_asq_alive(hw)) |
701 | i40e_aq_queue_shutdown(hw, unloading: true); |
702 | |
703 | i40e_shutdown_asq(hw); |
704 | i40e_shutdown_arq(hw); |
705 | |
706 | if (hw->nvm_buff.va) |
707 | i40e_free_virt_mem(hw, mem: &hw->nvm_buff); |
708 | } |
709 | |
710 | /** |
711 | * i40e_clean_asq - cleans Admin send queue |
712 | * @hw: pointer to the hardware structure |
713 | * |
714 | * returns the number of free desc |
715 | **/ |
716 | static u16 i40e_clean_asq(struct i40e_hw *hw) |
717 | { |
718 | struct i40e_adminq_ring *asq = &(hw->aq.asq); |
719 | struct i40e_asq_cmd_details *details; |
720 | u16 ntc = asq->next_to_clean; |
721 | struct i40e_aq_desc desc_cb; |
722 | struct i40e_aq_desc *desc; |
723 | |
724 | desc = I40E_ADMINQ_DESC(*asq, ntc); |
725 | details = I40E_ADMINQ_DETAILS(*asq, ntc); |
726 | while (rd32(hw, hw->aq.asq.head) != ntc) { |
727 | i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, |
728 | "ntc %d head %d.\n" , ntc, rd32(hw, hw->aq.asq.head)); |
729 | |
730 | if (details->callback) { |
731 | I40E_ADMINQ_CALLBACK cb_func = |
732 | (I40E_ADMINQ_CALLBACK)details->callback; |
733 | desc_cb = *desc; |
734 | cb_func(hw, &desc_cb); |
735 | } |
736 | memset(desc, 0, sizeof(*desc)); |
737 | memset(details, 0, sizeof(*details)); |
738 | ntc++; |
739 | if (ntc == asq->count) |
740 | ntc = 0; |
741 | desc = I40E_ADMINQ_DESC(*asq, ntc); |
742 | details = I40E_ADMINQ_DETAILS(*asq, ntc); |
743 | } |
744 | |
745 | asq->next_to_clean = ntc; |
746 | |
747 | return I40E_DESC_UNUSED(asq); |
748 | } |
749 | |
750 | /** |
751 | * i40e_asq_done - check if FW has processed the Admin Send Queue |
752 | * @hw: pointer to the hw struct |
753 | * |
754 | * Returns true if the firmware has processed all descriptors on the |
755 | * admin send queue. Returns false if there are still requests pending. |
756 | **/ |
757 | static bool i40e_asq_done(struct i40e_hw *hw) |
758 | { |
759 | /* AQ designers suggest use of head for better |
760 | * timing reliability than DD bit |
761 | */ |
762 | return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; |
763 | |
764 | } |
765 | |
766 | /** |
767 | * i40e_asq_send_command_atomic_exec - send command to Admin Queue |
768 | * @hw: pointer to the hw struct |
769 | * @desc: prefilled descriptor describing the command (non DMA mem) |
770 | * @buff: buffer to use for indirect commands |
771 | * @buff_size: size of buffer for indirect commands |
772 | * @cmd_details: pointer to command details structure |
773 | * @is_atomic_context: is the function called in an atomic context? |
774 | * |
775 | * This is the main send command driver routine for the Admin Queue send |
776 | * queue. It runs the queue, cleans the queue, etc |
777 | **/ |
778 | static int |
779 | i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, |
780 | struct i40e_aq_desc *desc, |
781 | void *buff, /* can be NULL */ |
782 | u16 buff_size, |
783 | struct i40e_asq_cmd_details *cmd_details, |
784 | bool is_atomic_context) |
785 | { |
786 | struct i40e_dma_mem *dma_buff = NULL; |
787 | struct i40e_asq_cmd_details *details; |
788 | struct i40e_aq_desc *desc_on_ring; |
789 | bool cmd_completed = false; |
790 | u16 retval = 0; |
791 | int status = 0; |
792 | u32 val = 0; |
793 | |
794 | if (hw->aq.asq.count == 0) { |
795 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, |
796 | "AQTX: Admin queue not initialized.\n" ); |
797 | status = -EIO; |
798 | goto asq_send_command_error; |
799 | } |
800 | |
801 | hw->aq.asq_last_status = I40E_AQ_RC_OK; |
802 | |
803 | val = rd32(hw, hw->aq.asq.head); |
804 | if (val >= hw->aq.num_asq_entries) { |
805 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, |
806 | "AQTX: head overrun at %d\n" , val); |
807 | status = -ENOSPC; |
808 | goto asq_send_command_error; |
809 | } |
810 | |
811 | details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); |
812 | if (cmd_details) { |
813 | *details = *cmd_details; |
814 | |
815 | /* If the cmd_details are defined copy the cookie. The |
816 | * cpu_to_le32 is not needed here because the data is ignored |
817 | * by the FW, only used by the driver |
818 | */ |
819 | if (details->cookie) { |
820 | desc->cookie_high = |
821 | cpu_to_le32(upper_32_bits(details->cookie)); |
822 | desc->cookie_low = |
823 | cpu_to_le32(lower_32_bits(details->cookie)); |
824 | } |
825 | } else { |
826 | memset(details, 0, sizeof(struct i40e_asq_cmd_details)); |
827 | } |
828 | |
829 | /* clear requested flags and then set additional flags if defined */ |
830 | desc->flags &= ~cpu_to_le16(details->flags_dis); |
831 | desc->flags |= cpu_to_le16(details->flags_ena); |
832 | |
833 | if (buff_size > hw->aq.asq_buf_size) { |
834 | i40e_debug(hw, |
835 | I40E_DEBUG_AQ_MESSAGE, |
836 | "AQTX: Invalid buffer size: %d.\n" , |
837 | buff_size); |
838 | status = -EINVAL; |
839 | goto asq_send_command_error; |
840 | } |
841 | |
842 | if (details->postpone && !details->async) { |
843 | i40e_debug(hw, |
844 | I40E_DEBUG_AQ_MESSAGE, |
845 | "AQTX: Async flag not set along with postpone flag" ); |
846 | status = -EINVAL; |
847 | goto asq_send_command_error; |
848 | } |
849 | |
850 | /* call clean and check queue available function to reclaim the |
851 | * descriptors that were processed by FW, the function returns the |
852 | * number of desc available |
853 | */ |
854 | /* the clean function called here could be called in a separate thread |
855 | * in case of asynchronous completions |
856 | */ |
857 | if (i40e_clean_asq(hw) == 0) { |
858 | i40e_debug(hw, |
859 | I40E_DEBUG_AQ_MESSAGE, |
860 | "AQTX: Error queue is full.\n" ); |
861 | status = -ENOSPC; |
862 | goto asq_send_command_error; |
863 | } |
864 | |
865 | /* initialize the temp desc pointer with the right desc */ |
866 | desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); |
867 | |
868 | /* if the desc is available copy the temp desc to the right place */ |
869 | *desc_on_ring = *desc; |
870 | |
871 | /* if buff is not NULL assume indirect command */ |
872 | if (buff != NULL) { |
873 | dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); |
874 | /* copy the user buff into the respective DMA buff */ |
875 | memcpy(dma_buff->va, buff, buff_size); |
876 | desc_on_ring->datalen = cpu_to_le16(buff_size); |
877 | |
878 | /* Update the address values in the desc with the pa value |
879 | * for respective buffer |
880 | */ |
881 | desc_on_ring->params.external.addr_high = |
882 | cpu_to_le32(upper_32_bits(dma_buff->pa)); |
883 | desc_on_ring->params.external.addr_low = |
884 | cpu_to_le32(lower_32_bits(dma_buff->pa)); |
885 | } |
886 | |
887 | /* bump the tail */ |
888 | i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n" ); |
889 | i40e_debug_aq(hw, mask: I40E_DEBUG_AQ_COMMAND, desc: (void *)desc_on_ring, |
890 | buffer: buff, buf_len: buff_size); |
891 | (hw->aq.asq.next_to_use)++; |
892 | if (hw->aq.asq.next_to_use == hw->aq.asq.count) |
893 | hw->aq.asq.next_to_use = 0; |
894 | if (!details->postpone) |
895 | wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); |
896 | |
897 | /* if cmd_details are not defined or async flag is not set, |
898 | * we need to wait for desc write back |
899 | */ |
900 | if (!details->async && !details->postpone) { |
901 | u32 total_delay = 0; |
902 | |
903 | do { |
904 | /* AQ designers suggest use of head for better |
905 | * timing reliability than DD bit |
906 | */ |
907 | if (i40e_asq_done(hw)) |
908 | break; |
909 | |
910 | if (is_atomic_context) |
911 | udelay(50); |
912 | else |
913 | usleep_range(min: 40, max: 60); |
914 | |
915 | total_delay += 50; |
916 | } while (total_delay < hw->aq.asq_cmd_timeout); |
917 | } |
918 | |
919 | /* if ready, copy the desc back to temp */ |
920 | if (i40e_asq_done(hw)) { |
921 | *desc = *desc_on_ring; |
922 | if (buff != NULL) |
923 | memcpy(buff, dma_buff->va, buff_size); |
924 | retval = le16_to_cpu(desc->retval); |
925 | if (retval != 0) { |
926 | i40e_debug(hw, |
927 | I40E_DEBUG_AQ_MESSAGE, |
928 | "AQTX: Command completed with error 0x%X.\n" , |
929 | retval); |
930 | |
931 | /* strip off FW internal code */ |
932 | retval &= 0xff; |
933 | } |
934 | cmd_completed = true; |
935 | if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) |
936 | status = 0; |
937 | else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) |
938 | status = -EBUSY; |
939 | else |
940 | status = -EIO; |
941 | hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; |
942 | } |
943 | |
944 | i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, |
945 | "AQTX: desc and buffer writeback:\n" ); |
946 | i40e_debug_aq(hw, mask: I40E_DEBUG_AQ_COMMAND, desc: (void *)desc, buffer: buff, buf_len: buff_size); |
947 | |
948 | /* save writeback aq if requested */ |
949 | if (details->wb_desc) |
950 | *details->wb_desc = *desc_on_ring; |
951 | |
952 | /* update the error if time out occurred */ |
953 | if ((!cmd_completed) && |
954 | (!details->async && !details->postpone)) { |
955 | if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { |
956 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, |
957 | "AQTX: AQ Critical error.\n" ); |
958 | status = -EIO; |
959 | } else { |
960 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, |
961 | "AQTX: Writeback timeout.\n" ); |
962 | status = -EIO; |
963 | } |
964 | } |
965 | |
966 | asq_send_command_error: |
967 | return status; |
968 | } |
969 | |
970 | /** |
971 | * i40e_asq_send_command_atomic - send command to Admin Queue |
972 | * @hw: pointer to the hw struct |
973 | * @desc: prefilled descriptor describing the command (non DMA mem) |
974 | * @buff: buffer to use for indirect commands |
975 | * @buff_size: size of buffer for indirect commands |
976 | * @cmd_details: pointer to command details structure |
977 | * @is_atomic_context: is the function called in an atomic context? |
978 | * |
979 | * Acquires the lock and calls the main send command execution |
980 | * routine. |
981 | **/ |
982 | int |
983 | i40e_asq_send_command_atomic(struct i40e_hw *hw, |
984 | struct i40e_aq_desc *desc, |
985 | void *buff, /* can be NULL */ |
986 | u16 buff_size, |
987 | struct i40e_asq_cmd_details *cmd_details, |
988 | bool is_atomic_context) |
989 | { |
990 | int status; |
991 | |
992 | mutex_lock(&hw->aq.asq_mutex); |
993 | status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, |
994 | cmd_details, |
995 | is_atomic_context); |
996 | |
997 | mutex_unlock(lock: &hw->aq.asq_mutex); |
998 | return status; |
999 | } |
1000 | |
1001 | int |
1002 | i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, |
1003 | void *buff, /* can be NULL */ u16 buff_size, |
1004 | struct i40e_asq_cmd_details *cmd_details) |
1005 | { |
1006 | return i40e_asq_send_command_atomic(hw, desc, buff, buff_size, |
1007 | cmd_details, is_atomic_context: false); |
1008 | } |
1009 | |
1010 | /** |
1011 | * i40e_asq_send_command_atomic_v2 - send command to Admin Queue |
1012 | * @hw: pointer to the hw struct |
1013 | * @desc: prefilled descriptor describing the command (non DMA mem) |
1014 | * @buff: buffer to use for indirect commands |
1015 | * @buff_size: size of buffer for indirect commands |
1016 | * @cmd_details: pointer to command details structure |
1017 | * @is_atomic_context: is the function called in an atomic context? |
1018 | * @aq_status: pointer to Admin Queue status return value |
1019 | * |
1020 | * Acquires the lock and calls the main send command execution |
1021 | * routine. Returns the last Admin Queue status in aq_status |
1022 | * to avoid race conditions in access to hw->aq.asq_last_status. |
1023 | **/ |
1024 | int |
1025 | i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, |
1026 | struct i40e_aq_desc *desc, |
1027 | void *buff, /* can be NULL */ |
1028 | u16 buff_size, |
1029 | struct i40e_asq_cmd_details *cmd_details, |
1030 | bool is_atomic_context, |
1031 | enum i40e_admin_queue_err *aq_status) |
1032 | { |
1033 | int status; |
1034 | |
1035 | mutex_lock(&hw->aq.asq_mutex); |
1036 | status = i40e_asq_send_command_atomic_exec(hw, desc, buff, |
1037 | buff_size, |
1038 | cmd_details, |
1039 | is_atomic_context); |
1040 | if (aq_status) |
1041 | *aq_status = hw->aq.asq_last_status; |
1042 | mutex_unlock(lock: &hw->aq.asq_mutex); |
1043 | return status; |
1044 | } |
1045 | |
1046 | int |
1047 | i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, |
1048 | void *buff, /* can be NULL */ u16 buff_size, |
1049 | struct i40e_asq_cmd_details *cmd_details, |
1050 | enum i40e_admin_queue_err *aq_status) |
1051 | { |
1052 | return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size, |
1053 | cmd_details, is_atomic_context: true, aq_status); |
1054 | } |
1055 | |
1056 | /** |
1057 | * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function |
1058 | * @desc: pointer to the temp descriptor (non DMA mem) |
1059 | * @opcode: the opcode can be used to decide which flags to turn off or on |
1060 | * |
1061 | * Fill the desc with default values |
1062 | **/ |
1063 | void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, |
1064 | u16 opcode) |
1065 | { |
1066 | /* zero out the desc */ |
1067 | memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); |
1068 | desc->opcode = cpu_to_le16(opcode); |
1069 | desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); |
1070 | } |
1071 | |
1072 | /** |
1073 | * i40e_clean_arq_element |
1074 | * @hw: pointer to the hw struct |
1075 | * @e: event info from the receive descriptor, includes any buffers |
1076 | * @pending: number of events that could be left to process |
1077 | * |
1078 | * This function cleans one Admin Receive Queue element and returns |
1079 | * the contents through e. It can also return how many events are |
1080 | * left to process through 'pending' |
1081 | **/ |
1082 | int i40e_clean_arq_element(struct i40e_hw *hw, |
1083 | struct i40e_arq_event_info *e, |
1084 | u16 *pending) |
1085 | { |
1086 | u16 ntc = hw->aq.arq.next_to_clean; |
1087 | struct i40e_aq_desc *desc; |
1088 | struct i40e_dma_mem *bi; |
1089 | int ret_code = 0; |
1090 | u16 desc_idx; |
1091 | u16 datalen; |
1092 | u16 flags; |
1093 | u16 ntu; |
1094 | |
1095 | /* pre-clean the event info */ |
1096 | memset(&e->desc, 0, sizeof(e->desc)); |
1097 | |
1098 | /* take the lock before we start messing with the ring */ |
1099 | mutex_lock(&hw->aq.arq_mutex); |
1100 | |
1101 | if (hw->aq.arq.count == 0) { |
1102 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, |
1103 | "AQRX: Admin queue not initialized.\n" ); |
1104 | ret_code = -EIO; |
1105 | goto clean_arq_element_err; |
1106 | } |
1107 | |
1108 | /* set next_to_use to head */ |
1109 | ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; |
1110 | if (ntu == ntc) { |
1111 | /* nothing to do - shouldn't need to update ring's values */ |
1112 | ret_code = -EALREADY; |
1113 | goto clean_arq_element_out; |
1114 | } |
1115 | |
1116 | /* now clean the next descriptor */ |
1117 | desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); |
1118 | desc_idx = ntc; |
1119 | |
1120 | hw->aq.arq_last_status = |
1121 | (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); |
1122 | flags = le16_to_cpu(desc->flags); |
1123 | if (flags & I40E_AQ_FLAG_ERR) { |
1124 | ret_code = -EIO; |
1125 | i40e_debug(hw, |
1126 | I40E_DEBUG_AQ_MESSAGE, |
1127 | "AQRX: Event received with error 0x%X.\n" , |
1128 | hw->aq.arq_last_status); |
1129 | } |
1130 | |
1131 | e->desc = *desc; |
1132 | datalen = le16_to_cpu(desc->datalen); |
1133 | e->msg_len = min(datalen, e->buf_len); |
1134 | if (e->msg_buf != NULL && (e->msg_len != 0)) |
1135 | memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, |
1136 | e->msg_len); |
1137 | |
1138 | i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n" ); |
1139 | i40e_debug_aq(hw, mask: I40E_DEBUG_AQ_COMMAND, desc: (void *)desc, buffer: e->msg_buf, |
1140 | buf_len: hw->aq.arq_buf_size); |
1141 | |
1142 | /* Restore the original datalen and buffer address in the desc, |
1143 | * FW updates datalen to indicate the event message |
1144 | * size |
1145 | */ |
1146 | bi = &hw->aq.arq.r.arq_bi[ntc]; |
1147 | memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); |
1148 | |
1149 | desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); |
1150 | if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) |
1151 | desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); |
1152 | desc->datalen = cpu_to_le16((u16)bi->size); |
1153 | desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); |
1154 | desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); |
1155 | |
1156 | /* set tail = the last cleaned desc index. */ |
1157 | wr32(hw, hw->aq.arq.tail, ntc); |
1158 | /* ntc is updated to tail + 1 */ |
1159 | ntc++; |
1160 | if (ntc == hw->aq.num_arq_entries) |
1161 | ntc = 0; |
1162 | hw->aq.arq.next_to_clean = ntc; |
1163 | hw->aq.arq.next_to_use = ntu; |
1164 | |
1165 | i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), desc: &e->desc); |
1166 | clean_arq_element_out: |
1167 | /* Set pending if needed, unlock and return */ |
1168 | if (pending) |
1169 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
1170 | clean_arq_element_err: |
1171 | mutex_unlock(lock: &hw->aq.arq_mutex); |
1172 | |
1173 | return ret_code; |
1174 | } |
1175 | |
1176 | static void i40e_resume_aq(struct i40e_hw *hw) |
1177 | { |
1178 | /* Registers are reset after PF reset */ |
1179 | hw->aq.asq.next_to_use = 0; |
1180 | hw->aq.asq.next_to_clean = 0; |
1181 | |
1182 | i40e_config_asq_regs(hw); |
1183 | |
1184 | hw->aq.arq.next_to_use = 0; |
1185 | hw->aq.arq.next_to_clean = 0; |
1186 | |
1187 | i40e_config_arq_regs(hw); |
1188 | } |
1189 | |