1 | /* bnx2x_cmn.c: QLogic Everest network driver. |
2 | * |
3 | * Copyright (c) 2007-2013 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation |
5 | * All rights reserved |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation. |
10 | * |
11 | * Maintained by: Ariel Elior <ariel.elior@qlogic.com> |
12 | * Written by: Eliezer Tamir |
13 | * Based on code from Michael Chan's bnx2 driver |
14 | * UDP CSUM errata workaround by Arik Gendelman |
15 | * Slowpath and fastpath rework by Vladislav Zolotarov |
16 | * Statistics and Link management by Yitchak Gertner |
17 | * |
18 | */ |
19 | |
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | |
22 | #include <linux/etherdevice.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/interrupt.h> |
25 | #include <linux/ip.h> |
26 | #include <linux/crash_dump.h> |
27 | #include <net/tcp.h> |
28 | #include <net/gro.h> |
29 | #include <net/ipv6.h> |
30 | #include <net/ip6_checksum.h> |
31 | #include <linux/prefetch.h> |
32 | #include "bnx2x_cmn.h" |
33 | #include "bnx2x_init.h" |
34 | #include "bnx2x_sp.h" |
35 | |
36 | static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); |
37 | static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); |
38 | static int bnx2x_alloc_fp_mem(struct bnx2x *bp); |
39 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
40 | |
41 | static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) |
42 | { |
43 | int i; |
44 | |
45 | /* Add NAPI objects */ |
46 | for_each_rx_queue_cnic(bp, i) { |
47 | netif_napi_add(dev: bp->dev, napi: &bnx2x_fp(bp, i, napi), poll: bnx2x_poll); |
48 | } |
49 | } |
50 | |
51 | static void bnx2x_add_all_napi(struct bnx2x *bp) |
52 | { |
53 | int i; |
54 | |
55 | /* Add NAPI objects */ |
56 | for_each_eth_queue(bp, i) { |
57 | netif_napi_add(dev: bp->dev, napi: &bnx2x_fp(bp, i, napi), poll: bnx2x_poll); |
58 | } |
59 | } |
60 | |
61 | static int bnx2x_calc_num_queues(struct bnx2x *bp) |
62 | { |
63 | int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); |
64 | |
65 | /* Reduce memory usage in kdump environment by using only one queue */ |
66 | if (is_kdump_kernel()) |
67 | nq = 1; |
68 | |
69 | nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); |
70 | return nq; |
71 | } |
72 | |
73 | /** |
74 | * bnx2x_move_fp - move content of the fastpath structure. |
75 | * |
76 | * @bp: driver handle |
77 | * @from: source FP index |
78 | * @to: destination FP index |
79 | * |
80 | * Makes sure the contents of the bp->fp[to].napi is kept |
81 | * intact. This is done by first copying the napi struct from |
82 | * the target to the source, and then mem copying the entire |
83 | * source onto the target. Update txdata pointers and related |
84 | * content. |
85 | */ |
86 | static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) |
87 | { |
88 | struct bnx2x_fastpath *from_fp = &bp->fp[from]; |
89 | struct bnx2x_fastpath *to_fp = &bp->fp[to]; |
90 | struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; |
91 | struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; |
92 | struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; |
93 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
94 | int old_max_eth_txqs, new_max_eth_txqs; |
95 | int old_txdata_index = 0, new_txdata_index = 0; |
96 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; |
97 | |
98 | /* Copy the NAPI object as it has been already initialized */ |
99 | from_fp->napi = to_fp->napi; |
100 | |
101 | /* Move bnx2x_fastpath contents */ |
102 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
103 | to_fp->index = to; |
104 | |
105 | /* Retain the tpa_info of the original `to' version as we don't want |
106 | * 2 FPs to contain the same tpa_info pointer. |
107 | */ |
108 | to_fp->tpa_info = old_tpa_info; |
109 | |
110 | /* move sp_objs contents as well, as their indices match fp ones */ |
111 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
112 | |
113 | /* move fp_stats contents as well, as their indices match fp ones */ |
114 | memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); |
115 | |
116 | /* Update txdata pointers in fp and move txdata content accordingly: |
117 | * Each fp consumes 'max_cos' txdata structures, so the index should be |
118 | * decremented by max_cos x delta. |
119 | */ |
120 | |
121 | old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; |
122 | new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * |
123 | (bp)->max_cos; |
124 | if (from == FCOE_IDX(bp)) { |
125 | old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; |
126 | new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; |
127 | } |
128 | |
129 | memcpy(&bp->bnx2x_txq[new_txdata_index], |
130 | &bp->bnx2x_txq[old_txdata_index], |
131 | sizeof(struct bnx2x_fp_txdata)); |
132 | to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; |
133 | } |
134 | |
135 | /** |
136 | * bnx2x_fill_fw_str - Fill buffer with FW version string. |
137 | * |
138 | * @bp: driver handle |
139 | * @buf: character buffer to fill with the fw name |
140 | * @buf_len: length of the above buffer |
141 | * |
142 | */ |
143 | void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) |
144 | { |
145 | if (IS_PF(bp)) { |
146 | u8 phy_fw_ver[PHY_FW_VER_LEN]; |
147 | |
148 | phy_fw_ver[0] = '\0'; |
149 | bnx2x_get_ext_phy_fw_version(params: &bp->link_params, |
150 | version: phy_fw_ver, PHY_FW_VER_LEN); |
151 | strscpy(p: buf, q: bp->fw_ver, size: buf_len); |
152 | snprintf(buf: buf + strlen(bp->fw_ver), size: 32 - strlen(bp->fw_ver), |
153 | fmt: "bc %d.%d.%d%s%s" , |
154 | (bp->common.bc_ver & 0xff0000) >> 16, |
155 | (bp->common.bc_ver & 0xff00) >> 8, |
156 | (bp->common.bc_ver & 0xff), |
157 | ((phy_fw_ver[0] != '\0') ? " phy " : "" ), phy_fw_ver); |
158 | } else { |
159 | bnx2x_vf_fill_fw_str(bp, buf, buf_len); |
160 | } |
161 | } |
162 | |
163 | /** |
164 | * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact |
165 | * |
166 | * @bp: driver handle |
167 | * @delta: number of eth queues which were not allocated |
168 | */ |
169 | static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) |
170 | { |
171 | int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); |
172 | |
173 | /* Queue pointer cannot be re-set on an fp-basis, as moving pointer |
174 | * backward along the array could cause memory to be overridden |
175 | */ |
176 | for (cos = 1; cos < bp->max_cos; cos++) { |
177 | for (i = 0; i < old_eth_num - delta; i++) { |
178 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
179 | int new_idx = cos * (old_eth_num - delta) + i; |
180 | |
181 | memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], |
182 | sizeof(struct bnx2x_fp_txdata)); |
183 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; |
184 | } |
185 | } |
186 | } |
187 | |
188 | int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ |
189 | |
190 | /* free skb in the packet ring at pos idx |
191 | * return idx of last bd freed |
192 | */ |
193 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, |
194 | u16 idx, unsigned int *pkts_compl, |
195 | unsigned int *bytes_compl) |
196 | { |
197 | struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; |
198 | struct eth_tx_start_bd *tx_start_bd; |
199 | struct eth_tx_bd *tx_data_bd; |
200 | struct sk_buff *skb = tx_buf->skb; |
201 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
202 | int nbd; |
203 | u16 split_bd_len = 0; |
204 | |
205 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
206 | prefetch(&skb->end); |
207 | |
208 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n" , |
209 | txdata->txq_index, idx, tx_buf, skb); |
210 | |
211 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; |
212 | |
213 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
214 | #ifdef BNX2X_STOP_ON_ERROR |
215 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { |
216 | BNX2X_ERR("BAD nbd!\n" ); |
217 | bnx2x_panic(); |
218 | } |
219 | #endif |
220 | new_cons = nbd + tx_buf->first_bd; |
221 | |
222 | /* Get the next bd */ |
223 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
224 | |
225 | /* Skip a parse bd... */ |
226 | --nbd; |
227 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
228 | |
229 | if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { |
230 | /* Skip second parse bd... */ |
231 | --nbd; |
232 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
233 | } |
234 | |
235 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
236 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
237 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; |
238 | split_bd_len = BD_UNMAP_LEN(tx_data_bd); |
239 | --nbd; |
240 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
241 | } |
242 | |
243 | /* unmap first bd */ |
244 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), |
245 | BD_UNMAP_LEN(tx_start_bd) + split_bd_len, |
246 | DMA_TO_DEVICE); |
247 | |
248 | /* now free frags */ |
249 | while (nbd > 0) { |
250 | |
251 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; |
252 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), |
253 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); |
254 | if (--nbd) |
255 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
256 | } |
257 | |
258 | /* release skb */ |
259 | WARN_ON(!skb); |
260 | if (likely(skb)) { |
261 | (*pkts_compl)++; |
262 | (*bytes_compl) += skb->len; |
263 | dev_kfree_skb_any(skb); |
264 | } |
265 | |
266 | tx_buf->first_bd = 0; |
267 | tx_buf->skb = NULL; |
268 | |
269 | return new_cons; |
270 | } |
271 | |
272 | int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) |
273 | { |
274 | struct netdev_queue *txq; |
275 | u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; |
276 | unsigned int pkts_compl = 0, bytes_compl = 0; |
277 | |
278 | #ifdef BNX2X_STOP_ON_ERROR |
279 | if (unlikely(bp->panic)) |
280 | return -1; |
281 | #endif |
282 | |
283 | txq = netdev_get_tx_queue(dev: bp->dev, index: txdata->txq_index); |
284 | hw_cons = le16_to_cpu(*txdata->tx_cons_sb); |
285 | sw_cons = txdata->tx_pkt_cons; |
286 | |
287 | /* Ensure subsequent loads occur after hw_cons */ |
288 | smp_rmb(); |
289 | |
290 | while (sw_cons != hw_cons) { |
291 | u16 pkt_cons; |
292 | |
293 | pkt_cons = TX_BD(sw_cons); |
294 | |
295 | DP(NETIF_MSG_TX_DONE, |
296 | "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n" , |
297 | txdata->txq_index, hw_cons, sw_cons, pkt_cons); |
298 | |
299 | bd_cons = bnx2x_free_tx_pkt(bp, txdata, idx: pkt_cons, |
300 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
301 | |
302 | sw_cons++; |
303 | } |
304 | |
305 | netdev_tx_completed_queue(dev_queue: txq, pkts: pkts_compl, bytes: bytes_compl); |
306 | |
307 | txdata->tx_pkt_cons = sw_cons; |
308 | txdata->tx_bd_cons = bd_cons; |
309 | |
310 | /* Need to make the tx_bd_cons update visible to start_xmit() |
311 | * before checking for netif_tx_queue_stopped(). Without the |
312 | * memory barrier, there is a small possibility that |
313 | * start_xmit() will miss it and cause the queue to be stopped |
314 | * forever. |
315 | * On the other hand we need an rmb() here to ensure the proper |
316 | * ordering of bit testing in the following |
317 | * netif_tx_queue_stopped(txq) call. |
318 | */ |
319 | smp_mb(); |
320 | |
321 | if (unlikely(netif_tx_queue_stopped(txq))) { |
322 | /* Taking tx_lock() is needed to prevent re-enabling the queue |
323 | * while it's empty. This could have happen if rx_action() gets |
324 | * suspended in bnx2x_tx_int() after the condition before |
325 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): |
326 | * |
327 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> |
328 | * sends some packets consuming the whole queue again-> |
329 | * stops the queue |
330 | */ |
331 | |
332 | __netif_tx_lock(txq, smp_processor_id()); |
333 | |
334 | if ((netif_tx_queue_stopped(dev_queue: txq)) && |
335 | (bp->state == BNX2X_STATE_OPEN) && |
336 | (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) |
337 | netif_tx_wake_queue(dev_queue: txq); |
338 | |
339 | __netif_tx_unlock(txq); |
340 | } |
341 | return 0; |
342 | } |
343 | |
344 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, |
345 | u16 idx) |
346 | { |
347 | u16 last_max = fp->last_max_sge; |
348 | |
349 | if (SUB_S16(idx, last_max) > 0) |
350 | fp->last_max_sge = idx; |
351 | } |
352 | |
353 | static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, |
354 | u16 sge_len, |
355 | struct eth_end_agg_rx_cqe *cqe) |
356 | { |
357 | struct bnx2x *bp = fp->bp; |
358 | u16 last_max, last_elem, first_elem; |
359 | u16 delta = 0; |
360 | u16 i; |
361 | |
362 | if (!sge_len) |
363 | return; |
364 | |
365 | /* First mark all used pages */ |
366 | for (i = 0; i < sge_len; i++) |
367 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, |
368 | RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); |
369 | |
370 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n" , |
371 | sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
372 | |
373 | /* Here we assume that the last SGE index is the biggest */ |
374 | prefetch((void *)(fp->sge_mask)); |
375 | bnx2x_update_last_max_sge(fp, |
376 | le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
377 | |
378 | last_max = RX_SGE(fp->last_max_sge); |
379 | last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; |
380 | first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; |
381 | |
382 | /* If ring is not full */ |
383 | if (last_elem + 1 != first_elem) |
384 | last_elem++; |
385 | |
386 | /* Now update the prod */ |
387 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { |
388 | if (likely(fp->sge_mask[i])) |
389 | break; |
390 | |
391 | fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; |
392 | delta += BIT_VEC64_ELEM_SZ; |
393 | } |
394 | |
395 | if (delta > 0) { |
396 | fp->rx_sge_prod += delta; |
397 | /* clear page-end entries */ |
398 | bnx2x_clear_sge_mask_next_elems(fp); |
399 | } |
400 | |
401 | DP(NETIF_MSG_RX_STATUS, |
402 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n" , |
403 | fp->last_max_sge, fp->rx_sge_prod); |
404 | } |
405 | |
406 | /* Get Toeplitz hash value in the skb using the value from the |
407 | * CQE (calculated by HW). |
408 | */ |
409 | static u32 bnx2x_get_rxhash(const struct bnx2x *bp, |
410 | const struct eth_fast_path_rx_cqe *cqe, |
411 | enum pkt_hash_types *rxhash_type) |
412 | { |
413 | /* Get Toeplitz hash from CQE */ |
414 | if ((bp->dev->features & NETIF_F_RXHASH) && |
415 | (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { |
416 | enum eth_rss_hash_type htype; |
417 | |
418 | htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; |
419 | *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || |
420 | (htype == TCP_IPV6_HASH_TYPE)) ? |
421 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; |
422 | |
423 | return le32_to_cpu(cqe->rss_hash_result); |
424 | } |
425 | *rxhash_type = PKT_HASH_TYPE_NONE; |
426 | return 0; |
427 | } |
428 | |
429 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, |
430 | u16 cons, u16 prod, |
431 | struct eth_fast_path_rx_cqe *cqe) |
432 | { |
433 | struct bnx2x *bp = fp->bp; |
434 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; |
435 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; |
436 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; |
437 | dma_addr_t mapping; |
438 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; |
439 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
440 | |
441 | /* print error if current state != stop */ |
442 | if (tpa_info->tpa_state != BNX2X_TPA_STOP) |
443 | BNX2X_ERR("start of bin not in stop [%d]\n" , queue); |
444 | |
445 | /* Try to map an empty data buffer from the aggregation info */ |
446 | mapping = dma_map_single(&bp->pdev->dev, |
447 | first_buf->data + NET_SKB_PAD, |
448 | fp->rx_buf_size, DMA_FROM_DEVICE); |
449 | /* |
450 | * ...if it fails - move the skb from the consumer to the producer |
451 | * and set the current aggregation state as ERROR to drop it |
452 | * when TPA_STOP arrives. |
453 | */ |
454 | |
455 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
456 | /* Move the BD from the consumer to the producer */ |
457 | bnx2x_reuse_rx_data(fp, cons, prod); |
458 | tpa_info->tpa_state = BNX2X_TPA_ERROR; |
459 | return; |
460 | } |
461 | |
462 | /* move empty data from pool to prod */ |
463 | prod_rx_buf->data = first_buf->data; |
464 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); |
465 | /* point prod_bd to new data */ |
466 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
467 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
468 | |
469 | /* move partial skb from cons to pool (don't unmap yet) */ |
470 | *first_buf = *cons_rx_buf; |
471 | |
472 | /* mark bin state as START */ |
473 | tpa_info->parsing_flags = |
474 | le16_to_cpu(cqe->pars_flags.flags); |
475 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); |
476 | tpa_info->tpa_state = BNX2X_TPA_START; |
477 | tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); |
478 | tpa_info->placement_offset = cqe->placement_offset; |
479 | tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, rxhash_type: &tpa_info->rxhash_type); |
480 | if (fp->mode == TPA_MODE_GRO) { |
481 | u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); |
482 | tpa_info->full_page = SGE_PAGES / gro_size * gro_size; |
483 | tpa_info->gro_size = gro_size; |
484 | } |
485 | |
486 | #ifdef BNX2X_STOP_ON_ERROR |
487 | fp->tpa_queue_used |= (1 << queue); |
488 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n" , |
489 | fp->tpa_queue_used); |
490 | #endif |
491 | } |
492 | |
493 | /* Timestamp option length allowed for TPA aggregation: |
494 | * |
495 | * nop nop kind length echo val |
496 | */ |
497 | #define TPA_TSTAMP_OPT_LEN 12 |
498 | /** |
499 | * bnx2x_set_gro_params - compute GRO values |
500 | * |
501 | * @skb: packet skb |
502 | * @parsing_flags: parsing flags from the START CQE |
503 | * @len_on_bd: total length of the first packet for the |
504 | * aggregation. |
505 | * @pkt_len: length of all segments |
506 | * @num_of_coalesced_segs: count of segments |
507 | * |
508 | * Approximate value of the MSS for this aggregation calculated using |
509 | * the first packet of it. |
510 | * Compute number of aggregated segments, and gso_type. |
511 | */ |
512 | static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, |
513 | u16 len_on_bd, unsigned int pkt_len, |
514 | u16 num_of_coalesced_segs) |
515 | { |
516 | /* TPA aggregation won't have either IP options or TCP options |
517 | * other than timestamp or IPv6 extension headers. |
518 | */ |
519 | u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); |
520 | |
521 | if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == |
522 | PRS_FLAG_OVERETH_IPV6) { |
523 | hdrs_len += sizeof(struct ipv6hdr); |
524 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
525 | } else { |
526 | hdrs_len += sizeof(struct iphdr); |
527 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
528 | } |
529 | |
530 | /* Check if there was a TCP timestamp, if there is it's will |
531 | * always be 12 bytes length: nop nop kind length echo val. |
532 | * |
533 | * Otherwise FW would close the aggregation. |
534 | */ |
535 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) |
536 | hdrs_len += TPA_TSTAMP_OPT_LEN; |
537 | |
538 | skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; |
539 | |
540 | /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count |
541 | * to skb_shinfo(skb)->gso_segs |
542 | */ |
543 | NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; |
544 | } |
545 | |
546 | static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
547 | u16 index, gfp_t gfp_mask) |
548 | { |
549 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; |
550 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; |
551 | struct bnx2x_alloc_pool *pool = &fp->page_pool; |
552 | dma_addr_t mapping; |
553 | |
554 | if (!pool->page) { |
555 | pool->page = alloc_pages(gfp: gfp_mask, PAGES_PER_SGE_SHIFT); |
556 | if (unlikely(!pool->page)) |
557 | return -ENOMEM; |
558 | |
559 | pool->offset = 0; |
560 | } |
561 | |
562 | mapping = dma_map_page(&bp->pdev->dev, pool->page, |
563 | pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); |
564 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
565 | BNX2X_ERR("Can't map sge\n" ); |
566 | return -ENOMEM; |
567 | } |
568 | |
569 | sw_buf->page = pool->page; |
570 | sw_buf->offset = pool->offset; |
571 | |
572 | dma_unmap_addr_set(sw_buf, mapping, mapping); |
573 | |
574 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); |
575 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); |
576 | |
577 | pool->offset += SGE_PAGE_SIZE; |
578 | if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) |
579 | get_page(page: pool->page); |
580 | else |
581 | pool->page = NULL; |
582 | return 0; |
583 | } |
584 | |
585 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
586 | struct bnx2x_agg_info *tpa_info, |
587 | u16 pages, |
588 | struct sk_buff *skb, |
589 | struct eth_end_agg_rx_cqe *cqe, |
590 | u16 cqe_idx) |
591 | { |
592 | struct sw_rx_page *rx_pg, old_rx_pg; |
593 | u32 i, frag_len, frag_size; |
594 | int err, j, frag_id = 0; |
595 | u16 len_on_bd = tpa_info->len_on_bd; |
596 | u16 full_page = 0, gro_size = 0; |
597 | |
598 | frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; |
599 | |
600 | if (fp->mode == TPA_MODE_GRO) { |
601 | gro_size = tpa_info->gro_size; |
602 | full_page = tpa_info->full_page; |
603 | } |
604 | |
605 | /* This is needed in order to enable forwarding support */ |
606 | if (frag_size) |
607 | bnx2x_set_gro_params(skb, parsing_flags: tpa_info->parsing_flags, len_on_bd, |
608 | le16_to_cpu(cqe->pkt_len), |
609 | le16_to_cpu(cqe->num_of_coalesced_segs)); |
610 | |
611 | #ifdef BNX2X_STOP_ON_ERROR |
612 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { |
613 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n" , |
614 | pages, cqe_idx); |
615 | BNX2X_ERR("cqe->pkt_len = %d\n" , cqe->pkt_len); |
616 | bnx2x_panic(); |
617 | return -EINVAL; |
618 | } |
619 | #endif |
620 | |
621 | /* Run through the SGL and compose the fragmented skb */ |
622 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { |
623 | u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); |
624 | |
625 | /* FW gives the indices of the SGE as if the ring is an array |
626 | (meaning that "next" element will consume 2 indices) */ |
627 | if (fp->mode == TPA_MODE_GRO) |
628 | frag_len = min_t(u32, frag_size, (u32)full_page); |
629 | else /* LRO */ |
630 | frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); |
631 | |
632 | rx_pg = &fp->rx_page_ring[sge_idx]; |
633 | old_rx_pg = *rx_pg; |
634 | |
635 | /* If we fail to allocate a substitute page, we simply stop |
636 | where we are and drop the whole packet */ |
637 | err = bnx2x_alloc_rx_sge(bp, fp, index: sge_idx, GFP_ATOMIC); |
638 | if (unlikely(err)) { |
639 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
640 | return err; |
641 | } |
642 | |
643 | dma_unmap_page(&bp->pdev->dev, |
644 | dma_unmap_addr(&old_rx_pg, mapping), |
645 | SGE_PAGE_SIZE, DMA_FROM_DEVICE); |
646 | /* Add one frag and update the appropriate fields in the skb */ |
647 | if (fp->mode == TPA_MODE_LRO) |
648 | skb_fill_page_desc(skb, i: j, page: old_rx_pg.page, |
649 | off: old_rx_pg.offset, size: frag_len); |
650 | else { /* GRO */ |
651 | int rem; |
652 | int offset = 0; |
653 | for (rem = frag_len; rem > 0; rem -= gro_size) { |
654 | int len = rem > gro_size ? gro_size : rem; |
655 | skb_fill_page_desc(skb, i: frag_id++, |
656 | page: old_rx_pg.page, |
657 | off: old_rx_pg.offset + offset, |
658 | size: len); |
659 | if (offset) |
660 | get_page(page: old_rx_pg.page); |
661 | offset += len; |
662 | } |
663 | } |
664 | |
665 | skb->data_len += frag_len; |
666 | skb->truesize += SGE_PAGES; |
667 | skb->len += frag_len; |
668 | |
669 | frag_size -= frag_len; |
670 | } |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | static struct sk_buff * |
676 | bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data) |
677 | { |
678 | struct sk_buff *skb; |
679 | |
680 | if (fp->rx_frag_size) |
681 | skb = build_skb(data, frag_size: fp->rx_frag_size); |
682 | else |
683 | skb = slab_build_skb(data); |
684 | return skb; |
685 | } |
686 | |
687 | static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) |
688 | { |
689 | if (fp->rx_frag_size) |
690 | skb_free_frag(addr: data); |
691 | else |
692 | kfree(objp: data); |
693 | } |
694 | |
695 | static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) |
696 | { |
697 | if (fp->rx_frag_size) { |
698 | /* GFP_KERNEL allocations are used only during initialization */ |
699 | if (unlikely(gfpflags_allow_blocking(gfp_mask))) |
700 | return (void *)__get_free_page(gfp_mask); |
701 | |
702 | return napi_alloc_frag(fragsz: fp->rx_frag_size); |
703 | } |
704 | |
705 | return kmalloc(size: fp->rx_buf_size + NET_SKB_PAD, flags: gfp_mask); |
706 | } |
707 | |
708 | #ifdef CONFIG_INET |
709 | static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) |
710 | { |
711 | const struct iphdr *iph = ip_hdr(skb); |
712 | struct tcphdr *th; |
713 | |
714 | skb_set_transport_header(skb, offset: sizeof(struct iphdr)); |
715 | th = tcp_hdr(skb); |
716 | |
717 | th->check = ~tcp_v4_check(len: skb->len - skb_transport_offset(skb), |
718 | saddr: iph->saddr, daddr: iph->daddr, base: 0); |
719 | } |
720 | |
721 | static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) |
722 | { |
723 | struct ipv6hdr *iph = ipv6_hdr(skb); |
724 | struct tcphdr *th; |
725 | |
726 | skb_set_transport_header(skb, offset: sizeof(struct ipv6hdr)); |
727 | th = tcp_hdr(skb); |
728 | |
729 | th->check = ~tcp_v6_check(len: skb->len - skb_transport_offset(skb), |
730 | saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
731 | } |
732 | |
733 | static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, |
734 | void (*gro_func)(struct bnx2x*, struct sk_buff*)) |
735 | { |
736 | skb_reset_network_header(skb); |
737 | gro_func(bp, skb); |
738 | tcp_gro_complete(skb); |
739 | } |
740 | #endif |
741 | |
742 | static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
743 | struct sk_buff *skb) |
744 | { |
745 | #ifdef CONFIG_INET |
746 | if (skb_shinfo(skb)->gso_size) { |
747 | switch (be16_to_cpu(skb->protocol)) { |
748 | case ETH_P_IP: |
749 | bnx2x_gro_csum(bp, skb, gro_func: bnx2x_gro_ip_csum); |
750 | break; |
751 | case ETH_P_IPV6: |
752 | bnx2x_gro_csum(bp, skb, gro_func: bnx2x_gro_ipv6_csum); |
753 | break; |
754 | default: |
755 | netdev_WARN_ONCE(bp->dev, |
756 | "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n" , |
757 | be16_to_cpu(skb->protocol)); |
758 | } |
759 | } |
760 | #endif |
761 | skb_record_rx_queue(skb, rx_queue: fp->rx_queue); |
762 | napi_gro_receive(napi: &fp->napi, skb); |
763 | } |
764 | |
765 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
766 | struct bnx2x_agg_info *tpa_info, |
767 | u16 pages, |
768 | struct eth_end_agg_rx_cqe *cqe, |
769 | u16 cqe_idx) |
770 | { |
771 | struct sw_rx_bd *rx_buf = &tpa_info->first_buf; |
772 | u8 pad = tpa_info->placement_offset; |
773 | u16 len = tpa_info->len_on_bd; |
774 | struct sk_buff *skb = NULL; |
775 | u8 *new_data, *data = rx_buf->data; |
776 | u8 old_tpa_state = tpa_info->tpa_state; |
777 | |
778 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
779 | |
780 | /* If we there was an error during the handling of the TPA_START - |
781 | * drop this aggregation. |
782 | */ |
783 | if (old_tpa_state == BNX2X_TPA_ERROR) |
784 | goto drop; |
785 | |
786 | /* Try to allocate the new data */ |
787 | new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); |
788 | /* Unmap skb in the pool anyway, as we are going to change |
789 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
790 | fails. */ |
791 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), |
792 | fp->rx_buf_size, DMA_FROM_DEVICE); |
793 | if (likely(new_data)) |
794 | skb = bnx2x_build_skb(fp, data); |
795 | |
796 | if (likely(skb)) { |
797 | #ifdef BNX2X_STOP_ON_ERROR |
798 | if (pad + len > fp->rx_buf_size) { |
799 | BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n" , |
800 | pad, len, fp->rx_buf_size); |
801 | bnx2x_panic(); |
802 | bnx2x_frag_free(fp, new_data); |
803 | return; |
804 | } |
805 | #endif |
806 | |
807 | skb_reserve(skb, len: pad + NET_SKB_PAD); |
808 | skb_put(skb, len); |
809 | skb_set_hash(skb, hash: tpa_info->rxhash, type: tpa_info->rxhash_type); |
810 | |
811 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
812 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
813 | |
814 | if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, |
815 | skb, cqe, cqe_idx)) { |
816 | if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) |
817 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: tpa_info->vlan_tag); |
818 | bnx2x_gro_receive(bp, fp, skb); |
819 | } else { |
820 | DP(NETIF_MSG_RX_STATUS, |
821 | "Failed to allocate new pages - dropping packet!\n" ); |
822 | dev_kfree_skb_any(skb); |
823 | } |
824 | |
825 | /* put new data in bin */ |
826 | rx_buf->data = new_data; |
827 | |
828 | return; |
829 | } |
830 | if (new_data) |
831 | bnx2x_frag_free(fp, data: new_data); |
832 | drop: |
833 | /* drop the packet and keep the buffer in the bin */ |
834 | DP(NETIF_MSG_RX_STATUS, |
835 | "Failed to allocate or map a new skb - dropping packet!\n" ); |
836 | bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; |
837 | } |
838 | |
839 | static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
840 | u16 index, gfp_t gfp_mask) |
841 | { |
842 | u8 *data; |
843 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; |
844 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; |
845 | dma_addr_t mapping; |
846 | |
847 | data = bnx2x_frag_alloc(fp, gfp_mask); |
848 | if (unlikely(data == NULL)) |
849 | return -ENOMEM; |
850 | |
851 | mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, |
852 | fp->rx_buf_size, |
853 | DMA_FROM_DEVICE); |
854 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
855 | bnx2x_frag_free(fp, data); |
856 | BNX2X_ERR("Can't map rx data\n" ); |
857 | return -ENOMEM; |
858 | } |
859 | |
860 | rx_buf->data = data; |
861 | dma_unmap_addr_set(rx_buf, mapping, mapping); |
862 | |
863 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
864 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
865 | |
866 | return 0; |
867 | } |
868 | |
869 | static |
870 | void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, |
871 | struct bnx2x_fastpath *fp, |
872 | struct bnx2x_eth_q_stats *qstats) |
873 | { |
874 | /* Do nothing if no L4 csum validation was done. |
875 | * We do not check whether IP csum was validated. For IPv4 we assume |
876 | * that if the card got as far as validating the L4 csum, it also |
877 | * validated the IP csum. IPv6 has no IP csum. |
878 | */ |
879 | if (cqe->fast_path_cqe.status_flags & |
880 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) |
881 | return; |
882 | |
883 | /* If L4 validation was done, check if an error was found. */ |
884 | |
885 | if (cqe->fast_path_cqe.type_error_flags & |
886 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | |
887 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) |
888 | qstats->hw_csum_err++; |
889 | else |
890 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
891 | } |
892 | |
893 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
894 | { |
895 | struct bnx2x *bp = fp->bp; |
896 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
897 | u16 sw_comp_cons, sw_comp_prod; |
898 | int rx_pkt = 0; |
899 | union eth_rx_cqe *cqe; |
900 | struct eth_fast_path_rx_cqe *cqe_fp; |
901 | |
902 | #ifdef BNX2X_STOP_ON_ERROR |
903 | if (unlikely(bp->panic)) |
904 | return 0; |
905 | #endif |
906 | if (budget <= 0) |
907 | return rx_pkt; |
908 | |
909 | bd_cons = fp->rx_bd_cons; |
910 | bd_prod = fp->rx_bd_prod; |
911 | bd_prod_fw = bd_prod; |
912 | sw_comp_cons = fp->rx_comp_cons; |
913 | sw_comp_prod = fp->rx_comp_prod; |
914 | |
915 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
916 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
917 | cqe_fp = &cqe->fast_path_cqe; |
918 | |
919 | DP(NETIF_MSG_RX_STATUS, |
920 | "queue[%d]: sw_comp_cons %u\n" , fp->index, sw_comp_cons); |
921 | |
922 | while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { |
923 | struct sw_rx_bd *rx_buf = NULL; |
924 | struct sk_buff *skb; |
925 | u8 cqe_fp_flags; |
926 | enum eth_rx_cqe_type cqe_fp_type; |
927 | u16 len, pad, queue; |
928 | u8 *data; |
929 | u32 rxhash; |
930 | enum pkt_hash_types rxhash_type; |
931 | |
932 | #ifdef BNX2X_STOP_ON_ERROR |
933 | if (unlikely(bp->panic)) |
934 | return 0; |
935 | #endif |
936 | |
937 | bd_prod = RX_BD(bd_prod); |
938 | bd_cons = RX_BD(bd_cons); |
939 | |
940 | /* A rmb() is required to ensure that the CQE is not read |
941 | * before it is written by the adapter DMA. PCI ordering |
942 | * rules will make sure the other fields are written before |
943 | * the marker at the end of struct eth_fast_path_rx_cqe |
944 | * but without rmb() a weakly ordered processor can process |
945 | * stale data. Without the barrier TPA state-machine might |
946 | * enter inconsistent state and kernel stack might be |
947 | * provided with incorrect packet description - these lead |
948 | * to various kernel crashed. |
949 | */ |
950 | rmb(); |
951 | |
952 | cqe_fp_flags = cqe_fp->type_error_flags; |
953 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; |
954 | |
955 | DP(NETIF_MSG_RX_STATUS, |
956 | "CQE type %x err %x status %x queue %x vlan %x len %u\n" , |
957 | CQE_TYPE(cqe_fp_flags), |
958 | cqe_fp_flags, cqe_fp->status_flags, |
959 | le32_to_cpu(cqe_fp->rss_hash_result), |
960 | le16_to_cpu(cqe_fp->vlan_tag), |
961 | le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); |
962 | |
963 | /* is this a slowpath msg? */ |
964 | if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { |
965 | bnx2x_sp_event(fp, rr_cqe: cqe); |
966 | goto next_cqe; |
967 | } |
968 | |
969 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
970 | data = rx_buf->data; |
971 | |
972 | if (!CQE_TYPE_FAST(cqe_fp_type)) { |
973 | struct bnx2x_agg_info *tpa_info; |
974 | u16 frag_size, pages; |
975 | #ifdef BNX2X_STOP_ON_ERROR |
976 | /* sanity check */ |
977 | if (fp->mode == TPA_MODE_DISABLED && |
978 | (CQE_TYPE_START(cqe_fp_type) || |
979 | CQE_TYPE_STOP(cqe_fp_type))) |
980 | BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n" , |
981 | CQE_TYPE(cqe_fp_type)); |
982 | #endif |
983 | |
984 | if (CQE_TYPE_START(cqe_fp_type)) { |
985 | u16 queue = cqe_fp->queue_index; |
986 | DP(NETIF_MSG_RX_STATUS, |
987 | "calling tpa_start on queue %d\n" , |
988 | queue); |
989 | |
990 | bnx2x_tpa_start(fp, queue, |
991 | cons: bd_cons, prod: bd_prod, |
992 | cqe: cqe_fp); |
993 | |
994 | goto next_rx; |
995 | } |
996 | queue = cqe->end_agg_cqe.queue_index; |
997 | tpa_info = &fp->tpa_info[queue]; |
998 | DP(NETIF_MSG_RX_STATUS, |
999 | "calling tpa_stop on queue %d\n" , |
1000 | queue); |
1001 | |
1002 | frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - |
1003 | tpa_info->len_on_bd; |
1004 | |
1005 | if (fp->mode == TPA_MODE_GRO) |
1006 | pages = (frag_size + tpa_info->full_page - 1) / |
1007 | tpa_info->full_page; |
1008 | else |
1009 | pages = SGE_PAGE_ALIGN(frag_size) >> |
1010 | SGE_PAGE_SHIFT; |
1011 | |
1012 | bnx2x_tpa_stop(bp, fp, tpa_info, pages, |
1013 | cqe: &cqe->end_agg_cqe, cqe_idx: comp_ring_cons); |
1014 | #ifdef BNX2X_STOP_ON_ERROR |
1015 | if (bp->panic) |
1016 | return 0; |
1017 | #endif |
1018 | |
1019 | bnx2x_update_sge_prod(fp, sge_len: pages, cqe: &cqe->end_agg_cqe); |
1020 | goto next_cqe; |
1021 | } |
1022 | /* non TPA */ |
1023 | len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); |
1024 | pad = cqe_fp->placement_offset; |
1025 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, |
1026 | dma_unmap_addr(rx_buf, mapping), |
1027 | size: pad + RX_COPY_THRESH, |
1028 | dir: DMA_FROM_DEVICE); |
1029 | pad += NET_SKB_PAD; |
1030 | prefetch(data + pad); /* speedup eth_type_trans() */ |
1031 | /* is this an error packet? */ |
1032 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { |
1033 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1034 | "ERROR flags %x rx packet %u\n" , |
1035 | cqe_fp_flags, sw_comp_cons); |
1036 | bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; |
1037 | goto reuse_rx; |
1038 | } |
1039 | |
1040 | /* Since we don't have a jumbo ring |
1041 | * copy small packets if mtu > 1500 |
1042 | */ |
1043 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && |
1044 | (len <= RX_COPY_THRESH)) { |
1045 | skb = napi_alloc_skb(napi: &fp->napi, length: len); |
1046 | if (skb == NULL) { |
1047 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1048 | "ERROR packet dropped because of alloc failure\n" ); |
1049 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
1050 | goto reuse_rx; |
1051 | } |
1052 | memcpy(skb->data, data + pad, len); |
1053 | bnx2x_reuse_rx_data(fp, cons: bd_cons, prod: bd_prod); |
1054 | } else { |
1055 | if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, |
1056 | GFP_ATOMIC) == 0)) { |
1057 | dma_unmap_single(&bp->pdev->dev, |
1058 | dma_unmap_addr(rx_buf, mapping), |
1059 | fp->rx_buf_size, |
1060 | DMA_FROM_DEVICE); |
1061 | skb = bnx2x_build_skb(fp, data); |
1062 | if (unlikely(!skb)) { |
1063 | bnx2x_frag_free(fp, data); |
1064 | bnx2x_fp_qstats(bp, fp)-> |
1065 | rx_skb_alloc_failed++; |
1066 | goto next_rx; |
1067 | } |
1068 | skb_reserve(skb, len: pad); |
1069 | } else { |
1070 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1071 | "ERROR packet dropped because of alloc failure\n" ); |
1072 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
1073 | reuse_rx: |
1074 | bnx2x_reuse_rx_data(fp, cons: bd_cons, prod: bd_prod); |
1075 | goto next_rx; |
1076 | } |
1077 | } |
1078 | |
1079 | skb_put(skb, len); |
1080 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
1081 | |
1082 | /* Set Toeplitz hash for a none-LRO skb */ |
1083 | rxhash = bnx2x_get_rxhash(bp, cqe: cqe_fp, rxhash_type: &rxhash_type); |
1084 | skb_set_hash(skb, hash: rxhash, type: rxhash_type); |
1085 | |
1086 | skb_checksum_none_assert(skb); |
1087 | |
1088 | if (bp->dev->features & NETIF_F_RXCSUM) |
1089 | bnx2x_csum_validate(skb, cqe, fp, |
1090 | bnx2x_fp_qstats(bp, fp)); |
1091 | |
1092 | skb_record_rx_queue(skb, rx_queue: fp->rx_queue); |
1093 | |
1094 | /* Check if this packet was timestamped */ |
1095 | if (unlikely(cqe->fast_path_cqe.type_error_flags & |
1096 | (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) |
1097 | bnx2x_set_rx_ts(bp, skb); |
1098 | |
1099 | if (le16_to_cpu(cqe_fp->pars_flags.flags) & |
1100 | PARSING_FLAGS_VLAN) |
1101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1102 | le16_to_cpu(cqe_fp->vlan_tag)); |
1103 | |
1104 | napi_gro_receive(napi: &fp->napi, skb); |
1105 | next_rx: |
1106 | rx_buf->data = NULL; |
1107 | |
1108 | bd_cons = NEXT_RX_IDX(bd_cons); |
1109 | bd_prod = NEXT_RX_IDX(bd_prod); |
1110 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); |
1111 | rx_pkt++; |
1112 | next_cqe: |
1113 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); |
1114 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); |
1115 | |
1116 | /* mark CQE as free */ |
1117 | BNX2X_SEED_CQE(cqe_fp); |
1118 | |
1119 | if (rx_pkt == budget) |
1120 | break; |
1121 | |
1122 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
1123 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
1124 | cqe_fp = &cqe->fast_path_cqe; |
1125 | } /* while */ |
1126 | |
1127 | fp->rx_bd_cons = bd_cons; |
1128 | fp->rx_bd_prod = bd_prod_fw; |
1129 | fp->rx_comp_cons = sw_comp_cons; |
1130 | fp->rx_comp_prod = sw_comp_prod; |
1131 | |
1132 | /* Update producers */ |
1133 | bnx2x_update_rx_prod(bp, fp, bd_prod: bd_prod_fw, rx_comp_prod: sw_comp_prod, |
1134 | rx_sge_prod: fp->rx_sge_prod); |
1135 | |
1136 | return rx_pkt; |
1137 | } |
1138 | |
1139 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) |
1140 | { |
1141 | struct bnx2x_fastpath *fp = fp_cookie; |
1142 | struct bnx2x *bp = fp->bp; |
1143 | u8 cos; |
1144 | |
1145 | DP(NETIF_MSG_INTR, |
1146 | "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n" , |
1147 | fp->index, fp->fw_sb_id, fp->igu_sb_id); |
1148 | |
1149 | bnx2x_ack_sb(bp, igu_sb_id: fp->igu_sb_id, storm: USTORM_ID, index: 0, op: IGU_INT_DISABLE, update: 0); |
1150 | |
1151 | #ifdef BNX2X_STOP_ON_ERROR |
1152 | if (unlikely(bp->panic)) |
1153 | return IRQ_HANDLED; |
1154 | #endif |
1155 | |
1156 | /* Handle Rx and Tx according to MSI-X vector */ |
1157 | for_each_cos_in_tx_queue(fp, cos) |
1158 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); |
1159 | |
1160 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
1161 | napi_schedule_irqoff(n: &bnx2x_fp(bp, fp->index, napi)); |
1162 | |
1163 | return IRQ_HANDLED; |
1164 | } |
1165 | |
1166 | /* HW Lock for shared dual port PHYs */ |
1167 | void bnx2x_acquire_phy_lock(struct bnx2x *bp) |
1168 | { |
1169 | mutex_lock(&bp->port.phy_mutex); |
1170 | |
1171 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); |
1172 | } |
1173 | |
1174 | void bnx2x_release_phy_lock(struct bnx2x *bp) |
1175 | { |
1176 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); |
1177 | |
1178 | mutex_unlock(lock: &bp->port.phy_mutex); |
1179 | } |
1180 | |
1181 | /* calculates MF speed according to current linespeed and MF configuration */ |
1182 | u16 bnx2x_get_mf_speed(struct bnx2x *bp) |
1183 | { |
1184 | u16 line_speed = bp->link_vars.line_speed; |
1185 | if (IS_MF(bp)) { |
1186 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
1187 | mf_cfg: bp->mf_config[BP_VN(bp)]); |
1188 | |
1189 | /* Calculate the current MAX line speed limit for the MF |
1190 | * devices |
1191 | */ |
1192 | if (IS_MF_PERCENT_BW(bp)) |
1193 | line_speed = (line_speed * maxCfg) / 100; |
1194 | else { /* SD mode */ |
1195 | u16 vn_max_rate = maxCfg * 100; |
1196 | |
1197 | if (vn_max_rate < line_speed) |
1198 | line_speed = vn_max_rate; |
1199 | } |
1200 | } |
1201 | |
1202 | return line_speed; |
1203 | } |
1204 | |
1205 | /** |
1206 | * bnx2x_fill_report_data - fill link report data to report |
1207 | * |
1208 | * @bp: driver handle |
1209 | * @data: link state to update |
1210 | * |
1211 | * It uses a none-atomic bit operations because is called under the mutex. |
1212 | */ |
1213 | static void bnx2x_fill_report_data(struct bnx2x *bp, |
1214 | struct bnx2x_link_report_data *data) |
1215 | { |
1216 | memset(data, 0, sizeof(*data)); |
1217 | |
1218 | if (IS_PF(bp)) { |
1219 | /* Fill the report data: effective line speed */ |
1220 | data->line_speed = bnx2x_get_mf_speed(bp); |
1221 | |
1222 | /* Link is down */ |
1223 | if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) |
1224 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1225 | &data->link_report_flags); |
1226 | |
1227 | if (!BNX2X_NUM_ETH_QUEUES(bp)) |
1228 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1229 | &data->link_report_flags); |
1230 | |
1231 | /* Full DUPLEX */ |
1232 | if (bp->link_vars.duplex == DUPLEX_FULL) |
1233 | __set_bit(BNX2X_LINK_REPORT_FD, |
1234 | &data->link_report_flags); |
1235 | |
1236 | /* Rx Flow Control is ON */ |
1237 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) |
1238 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, |
1239 | &data->link_report_flags); |
1240 | |
1241 | /* Tx Flow Control is ON */ |
1242 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) |
1243 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, |
1244 | &data->link_report_flags); |
1245 | } else { /* VF */ |
1246 | *data = bp->vf_link_vars; |
1247 | } |
1248 | } |
1249 | |
1250 | /** |
1251 | * bnx2x_link_report - report link status to OS. |
1252 | * |
1253 | * @bp: driver handle |
1254 | * |
1255 | * Calls the __bnx2x_link_report() under the same locking scheme |
1256 | * as a link/PHY state managing code to ensure a consistent link |
1257 | * reporting. |
1258 | */ |
1259 | |
1260 | void bnx2x_link_report(struct bnx2x *bp) |
1261 | { |
1262 | bnx2x_acquire_phy_lock(bp); |
1263 | __bnx2x_link_report(bp); |
1264 | bnx2x_release_phy_lock(bp); |
1265 | } |
1266 | |
1267 | /** |
1268 | * __bnx2x_link_report - report link status to OS. |
1269 | * |
1270 | * @bp: driver handle |
1271 | * |
1272 | * None atomic implementation. |
1273 | * Should be called under the phy_lock. |
1274 | */ |
1275 | void __bnx2x_link_report(struct bnx2x *bp) |
1276 | { |
1277 | struct bnx2x_link_report_data cur_data; |
1278 | |
1279 | if (bp->force_link_down) { |
1280 | bp->link_vars.link_up = 0; |
1281 | return; |
1282 | } |
1283 | |
1284 | /* reread mf_cfg */ |
1285 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) |
1286 | bnx2x_read_mf_cfg(bp); |
1287 | |
1288 | /* Read the current link report info */ |
1289 | bnx2x_fill_report_data(bp, data: &cur_data); |
1290 | |
1291 | /* Don't report link down or exactly the same link status twice */ |
1292 | if (!memcmp(p: &cur_data, q: &bp->last_reported_link, size: sizeof(cur_data)) || |
1293 | (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1294 | &bp->last_reported_link.link_report_flags) && |
1295 | test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1296 | &cur_data.link_report_flags))) |
1297 | return; |
1298 | |
1299 | bp->link_cnt++; |
1300 | |
1301 | /* We are going to report a new link parameters now - |
1302 | * remember the current data for the next time. |
1303 | */ |
1304 | memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); |
1305 | |
1306 | /* propagate status to VFs */ |
1307 | if (IS_PF(bp)) |
1308 | bnx2x_iov_link_update(bp); |
1309 | |
1310 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1311 | &cur_data.link_report_flags)) { |
1312 | netif_carrier_off(dev: bp->dev); |
1313 | netdev_err(dev: bp->dev, format: "NIC Link is Down\n" ); |
1314 | return; |
1315 | } else { |
1316 | const char *duplex; |
1317 | const char *flow; |
1318 | |
1319 | netif_carrier_on(dev: bp->dev); |
1320 | |
1321 | if (test_and_clear_bit(nr: BNX2X_LINK_REPORT_FD, |
1322 | addr: &cur_data.link_report_flags)) |
1323 | duplex = "full" ; |
1324 | else |
1325 | duplex = "half" ; |
1326 | |
1327 | /* Handle the FC at the end so that only these flags would be |
1328 | * possibly set. This way we may easily check if there is no FC |
1329 | * enabled. |
1330 | */ |
1331 | if (cur_data.link_report_flags) { |
1332 | if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, |
1333 | &cur_data.link_report_flags)) { |
1334 | if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, |
1335 | &cur_data.link_report_flags)) |
1336 | flow = "ON - receive & transmit" ; |
1337 | else |
1338 | flow = "ON - receive" ; |
1339 | } else { |
1340 | flow = "ON - transmit" ; |
1341 | } |
1342 | } else { |
1343 | flow = "none" ; |
1344 | } |
1345 | netdev_info(dev: bp->dev, format: "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n" , |
1346 | cur_data.line_speed, duplex, flow); |
1347 | } |
1348 | } |
1349 | |
1350 | static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) |
1351 | { |
1352 | int i; |
1353 | |
1354 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { |
1355 | struct eth_rx_sge *sge; |
1356 | |
1357 | sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; |
1358 | sge->addr_hi = |
1359 | cpu_to_le32(U64_HI(fp->rx_sge_mapping + |
1360 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); |
1361 | |
1362 | sge->addr_lo = |
1363 | cpu_to_le32(U64_LO(fp->rx_sge_mapping + |
1364 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); |
1365 | } |
1366 | } |
1367 | |
1368 | static void bnx2x_free_tpa_pool(struct bnx2x *bp, |
1369 | struct bnx2x_fastpath *fp, int last) |
1370 | { |
1371 | int i; |
1372 | |
1373 | for (i = 0; i < last; i++) { |
1374 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; |
1375 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
1376 | u8 *data = first_buf->data; |
1377 | |
1378 | if (data == NULL) { |
1379 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n" , i); |
1380 | continue; |
1381 | } |
1382 | if (tpa_info->tpa_state == BNX2X_TPA_START) |
1383 | dma_unmap_single(&bp->pdev->dev, |
1384 | dma_unmap_addr(first_buf, mapping), |
1385 | fp->rx_buf_size, DMA_FROM_DEVICE); |
1386 | bnx2x_frag_free(fp, data); |
1387 | first_buf->data = NULL; |
1388 | } |
1389 | } |
1390 | |
1391 | void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) |
1392 | { |
1393 | int j; |
1394 | |
1395 | for_each_rx_queue_cnic(bp, j) { |
1396 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1397 | |
1398 | fp->rx_bd_cons = 0; |
1399 | |
1400 | /* Activate BD ring */ |
1401 | /* Warning! |
1402 | * this will generate an interrupt (to the TSTORM) |
1403 | * must only be done after chip is initialized |
1404 | */ |
1405 | bnx2x_update_rx_prod(bp, fp, bd_prod: fp->rx_bd_prod, rx_comp_prod: fp->rx_comp_prod, |
1406 | rx_sge_prod: fp->rx_sge_prod); |
1407 | } |
1408 | } |
1409 | |
1410 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
1411 | { |
1412 | int func = BP_FUNC(bp); |
1413 | u16 ring_prod; |
1414 | int i, j; |
1415 | |
1416 | /* Allocate TPA resources */ |
1417 | for_each_eth_queue(bp, j) { |
1418 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1419 | |
1420 | DP(NETIF_MSG_IFUP, |
1421 | "mtu %d rx_buf_size %d\n" , bp->dev->mtu, fp->rx_buf_size); |
1422 | |
1423 | if (fp->mode != TPA_MODE_DISABLED) { |
1424 | /* Fill the per-aggregation pool */ |
1425 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1426 | struct bnx2x_agg_info *tpa_info = |
1427 | &fp->tpa_info[i]; |
1428 | struct sw_rx_bd *first_buf = |
1429 | &tpa_info->first_buf; |
1430 | |
1431 | first_buf->data = |
1432 | bnx2x_frag_alloc(fp, GFP_KERNEL); |
1433 | if (!first_buf->data) { |
1434 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n" , |
1435 | j); |
1436 | bnx2x_free_tpa_pool(bp, fp, last: i); |
1437 | fp->mode = TPA_MODE_DISABLED; |
1438 | break; |
1439 | } |
1440 | dma_unmap_addr_set(first_buf, mapping, 0); |
1441 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
1442 | } |
1443 | |
1444 | /* "next page" elements initialization */ |
1445 | bnx2x_set_next_page_sgl(fp); |
1446 | |
1447 | /* set SGEs bit mask */ |
1448 | bnx2x_init_sge_ring_bit_mask(fp); |
1449 | |
1450 | /* Allocate SGEs and initialize the ring elements */ |
1451 | for (i = 0, ring_prod = 0; |
1452 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { |
1453 | |
1454 | if (bnx2x_alloc_rx_sge(bp, fp, index: ring_prod, |
1455 | GFP_KERNEL) < 0) { |
1456 | BNX2X_ERR("was only able to allocate %d rx sges\n" , |
1457 | i); |
1458 | BNX2X_ERR("disabling TPA for queue[%d]\n" , |
1459 | j); |
1460 | /* Cleanup already allocated elements */ |
1461 | bnx2x_free_rx_sge_range(bp, fp, |
1462 | last: ring_prod); |
1463 | bnx2x_free_tpa_pool(bp, fp, |
1464 | MAX_AGG_QS(bp)); |
1465 | fp->mode = TPA_MODE_DISABLED; |
1466 | ring_prod = 0; |
1467 | break; |
1468 | } |
1469 | ring_prod = NEXT_SGE_IDX(ring_prod); |
1470 | } |
1471 | |
1472 | fp->rx_sge_prod = ring_prod; |
1473 | } |
1474 | } |
1475 | |
1476 | for_each_eth_queue(bp, j) { |
1477 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1478 | |
1479 | fp->rx_bd_cons = 0; |
1480 | |
1481 | /* Activate BD ring */ |
1482 | /* Warning! |
1483 | * this will generate an interrupt (to the TSTORM) |
1484 | * must only be done after chip is initialized |
1485 | */ |
1486 | bnx2x_update_rx_prod(bp, fp, bd_prod: fp->rx_bd_prod, rx_comp_prod: fp->rx_comp_prod, |
1487 | rx_sge_prod: fp->rx_sge_prod); |
1488 | |
1489 | if (j != 0) |
1490 | continue; |
1491 | |
1492 | if (CHIP_IS_E1(bp)) { |
1493 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1494 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), |
1495 | U64_LO(fp->rx_comp_mapping)); |
1496 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1497 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, |
1498 | U64_HI(fp->rx_comp_mapping)); |
1499 | } |
1500 | } |
1501 | } |
1502 | |
1503 | static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) |
1504 | { |
1505 | u8 cos; |
1506 | struct bnx2x *bp = fp->bp; |
1507 | |
1508 | for_each_cos_in_tx_queue(fp, cos) { |
1509 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
1510 | unsigned pkts_compl = 0, bytes_compl = 0; |
1511 | |
1512 | u16 sw_prod = txdata->tx_pkt_prod; |
1513 | u16 sw_cons = txdata->tx_pkt_cons; |
1514 | |
1515 | while (sw_cons != sw_prod) { |
1516 | bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), |
1517 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
1518 | sw_cons++; |
1519 | } |
1520 | |
1521 | netdev_tx_reset_queue( |
1522 | q: netdev_get_tx_queue(dev: bp->dev, |
1523 | index: txdata->txq_index)); |
1524 | } |
1525 | } |
1526 | |
1527 | static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) |
1528 | { |
1529 | int i; |
1530 | |
1531 | for_each_tx_queue_cnic(bp, i) { |
1532 | bnx2x_free_tx_skbs_queue(fp: &bp->fp[i]); |
1533 | } |
1534 | } |
1535 | |
1536 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) |
1537 | { |
1538 | int i; |
1539 | |
1540 | for_each_eth_queue(bp, i) { |
1541 | bnx2x_free_tx_skbs_queue(fp: &bp->fp[i]); |
1542 | } |
1543 | } |
1544 | |
1545 | static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) |
1546 | { |
1547 | struct bnx2x *bp = fp->bp; |
1548 | int i; |
1549 | |
1550 | /* ring wasn't allocated */ |
1551 | if (fp->rx_buf_ring == NULL) |
1552 | return; |
1553 | |
1554 | for (i = 0; i < NUM_RX_BD; i++) { |
1555 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; |
1556 | u8 *data = rx_buf->data; |
1557 | |
1558 | if (data == NULL) |
1559 | continue; |
1560 | dma_unmap_single(&bp->pdev->dev, |
1561 | dma_unmap_addr(rx_buf, mapping), |
1562 | fp->rx_buf_size, DMA_FROM_DEVICE); |
1563 | |
1564 | rx_buf->data = NULL; |
1565 | bnx2x_frag_free(fp, data); |
1566 | } |
1567 | } |
1568 | |
1569 | static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) |
1570 | { |
1571 | int j; |
1572 | |
1573 | for_each_rx_queue_cnic(bp, j) { |
1574 | bnx2x_free_rx_bds(fp: &bp->fp[j]); |
1575 | } |
1576 | } |
1577 | |
1578 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) |
1579 | { |
1580 | int j; |
1581 | |
1582 | for_each_eth_queue(bp, j) { |
1583 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1584 | |
1585 | bnx2x_free_rx_bds(fp); |
1586 | |
1587 | if (fp->mode != TPA_MODE_DISABLED) |
1588 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1589 | } |
1590 | } |
1591 | |
1592 | static void bnx2x_free_skbs_cnic(struct bnx2x *bp) |
1593 | { |
1594 | bnx2x_free_tx_skbs_cnic(bp); |
1595 | bnx2x_free_rx_skbs_cnic(bp); |
1596 | } |
1597 | |
1598 | void bnx2x_free_skbs(struct bnx2x *bp) |
1599 | { |
1600 | bnx2x_free_tx_skbs(bp); |
1601 | bnx2x_free_rx_skbs(bp); |
1602 | } |
1603 | |
1604 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) |
1605 | { |
1606 | /* load old values */ |
1607 | u32 mf_cfg = bp->mf_config[BP_VN(bp)]; |
1608 | |
1609 | if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { |
1610 | /* leave all but MAX value */ |
1611 | mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; |
1612 | |
1613 | /* set new MAX value */ |
1614 | mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) |
1615 | & FUNC_MF_CFG_MAX_BW_MASK; |
1616 | |
1617 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param: mf_cfg); |
1618 | } |
1619 | } |
1620 | |
1621 | /** |
1622 | * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors |
1623 | * |
1624 | * @bp: driver handle |
1625 | * @nvecs: number of vectors to be released |
1626 | */ |
1627 | static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) |
1628 | { |
1629 | int i, offset = 0; |
1630 | |
1631 | if (nvecs == offset) |
1632 | return; |
1633 | |
1634 | /* VFs don't have a default SB */ |
1635 | if (IS_PF(bp)) { |
1636 | free_irq(bp->msix_table[offset].vector, bp->dev); |
1637 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n" , |
1638 | bp->msix_table[offset].vector); |
1639 | offset++; |
1640 | } |
1641 | |
1642 | if (CNIC_SUPPORT(bp)) { |
1643 | if (nvecs == offset) |
1644 | return; |
1645 | offset++; |
1646 | } |
1647 | |
1648 | for_each_eth_queue(bp, i) { |
1649 | if (nvecs == offset) |
1650 | return; |
1651 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n" , |
1652 | i, bp->msix_table[offset].vector); |
1653 | |
1654 | free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); |
1655 | } |
1656 | } |
1657 | |
1658 | void bnx2x_free_irq(struct bnx2x *bp) |
1659 | { |
1660 | if (bp->flags & USING_MSIX_FLAG && |
1661 | !(bp->flags & USING_SINGLE_MSIX_FLAG)) { |
1662 | int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); |
1663 | |
1664 | /* vfs don't have a default status block */ |
1665 | if (IS_PF(bp)) |
1666 | nvecs++; |
1667 | |
1668 | bnx2x_free_msix_irqs(bp, nvecs); |
1669 | } else { |
1670 | free_irq(bp->dev->irq, bp->dev); |
1671 | } |
1672 | } |
1673 | |
1674 | int bnx2x_enable_msix(struct bnx2x *bp) |
1675 | { |
1676 | int msix_vec = 0, i, rc; |
1677 | |
1678 | /* VFs don't have a default status block */ |
1679 | if (IS_PF(bp)) { |
1680 | bp->msix_table[msix_vec].entry = msix_vec; |
1681 | BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n" , |
1682 | bp->msix_table[0].entry); |
1683 | msix_vec++; |
1684 | } |
1685 | |
1686 | /* Cnic requires an msix vector for itself */ |
1687 | if (CNIC_SUPPORT(bp)) { |
1688 | bp->msix_table[msix_vec].entry = msix_vec; |
1689 | BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n" , |
1690 | msix_vec, bp->msix_table[msix_vec].entry); |
1691 | msix_vec++; |
1692 | } |
1693 | |
1694 | /* We need separate vectors for ETH queues only (not FCoE) */ |
1695 | for_each_eth_queue(bp, i) { |
1696 | bp->msix_table[msix_vec].entry = msix_vec; |
1697 | BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n" , |
1698 | msix_vec, msix_vec, i); |
1699 | msix_vec++; |
1700 | } |
1701 | |
1702 | DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n" , |
1703 | msix_vec); |
1704 | |
1705 | rc = pci_enable_msix_range(dev: bp->pdev, entries: &bp->msix_table[0], |
1706 | BNX2X_MIN_MSIX_VEC_CNT(bp), maxvec: msix_vec); |
1707 | /* |
1708 | * reconfigure number of tx/rx queues according to available |
1709 | * MSI-X vectors |
1710 | */ |
1711 | if (rc == -ENOSPC) { |
1712 | /* Get by with single vector */ |
1713 | rc = pci_enable_msix_range(dev: bp->pdev, entries: &bp->msix_table[0], minvec: 1, maxvec: 1); |
1714 | if (rc < 0) { |
1715 | BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n" , |
1716 | rc); |
1717 | goto no_msix; |
1718 | } |
1719 | |
1720 | BNX2X_DEV_INFO("Using single MSI-X vector\n" ); |
1721 | bp->flags |= USING_SINGLE_MSIX_FLAG; |
1722 | |
1723 | BNX2X_DEV_INFO("set number of queues to 1\n" ); |
1724 | bp->num_ethernet_queues = 1; |
1725 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1726 | } else if (rc < 0) { |
1727 | BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n" , rc); |
1728 | goto no_msix; |
1729 | } else if (rc < msix_vec) { |
1730 | /* how less vectors we will have? */ |
1731 | int diff = msix_vec - rc; |
1732 | |
1733 | BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n" , rc); |
1734 | |
1735 | /* |
1736 | * decrease number of queues by number of unallocated entries |
1737 | */ |
1738 | bp->num_ethernet_queues -= diff; |
1739 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1740 | |
1741 | BNX2X_DEV_INFO("New queue configuration set: %d\n" , |
1742 | bp->num_queues); |
1743 | } |
1744 | |
1745 | bp->flags |= USING_MSIX_FLAG; |
1746 | |
1747 | return 0; |
1748 | |
1749 | no_msix: |
1750 | /* fall to INTx if not enough memory */ |
1751 | if (rc == -ENOMEM) |
1752 | bp->flags |= DISABLE_MSI_FLAG; |
1753 | |
1754 | return rc; |
1755 | } |
1756 | |
1757 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
1758 | { |
1759 | int i, rc, offset = 0; |
1760 | |
1761 | /* no default status block for vf */ |
1762 | if (IS_PF(bp)) { |
1763 | rc = request_irq(irq: bp->msix_table[offset++].vector, |
1764 | handler: bnx2x_msix_sp_int, flags: 0, |
1765 | name: bp->dev->name, dev: bp->dev); |
1766 | if (rc) { |
1767 | BNX2X_ERR("request sp irq failed\n" ); |
1768 | return -EBUSY; |
1769 | } |
1770 | } |
1771 | |
1772 | if (CNIC_SUPPORT(bp)) |
1773 | offset++; |
1774 | |
1775 | for_each_eth_queue(bp, i) { |
1776 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1777 | snprintf(buf: fp->name, size: sizeof(fp->name), fmt: "%s-fp-%d" , |
1778 | bp->dev->name, i); |
1779 | |
1780 | rc = request_irq(irq: bp->msix_table[offset].vector, |
1781 | handler: bnx2x_msix_fp_int, flags: 0, name: fp->name, dev: fp); |
1782 | if (rc) { |
1783 | BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n" , i, |
1784 | bp->msix_table[offset].vector, rc); |
1785 | bnx2x_free_msix_irqs(bp, nvecs: offset); |
1786 | return -EBUSY; |
1787 | } |
1788 | |
1789 | offset++; |
1790 | } |
1791 | |
1792 | i = BNX2X_NUM_ETH_QUEUES(bp); |
1793 | if (IS_PF(bp)) { |
1794 | offset = 1 + CNIC_SUPPORT(bp); |
1795 | netdev_info(dev: bp->dev, |
1796 | format: "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n" , |
1797 | bp->msix_table[0].vector, |
1798 | 0, bp->msix_table[offset].vector, |
1799 | i - 1, bp->msix_table[offset + i - 1].vector); |
1800 | } else { |
1801 | offset = CNIC_SUPPORT(bp); |
1802 | netdev_info(dev: bp->dev, |
1803 | format: "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n" , |
1804 | 0, bp->msix_table[offset].vector, |
1805 | i - 1, bp->msix_table[offset + i - 1].vector); |
1806 | } |
1807 | return 0; |
1808 | } |
1809 | |
1810 | int bnx2x_enable_msi(struct bnx2x *bp) |
1811 | { |
1812 | int rc; |
1813 | |
1814 | rc = pci_enable_msi(dev: bp->pdev); |
1815 | if (rc) { |
1816 | BNX2X_DEV_INFO("MSI is not attainable\n" ); |
1817 | return -1; |
1818 | } |
1819 | bp->flags |= USING_MSI_FLAG; |
1820 | |
1821 | return 0; |
1822 | } |
1823 | |
1824 | static int bnx2x_req_irq(struct bnx2x *bp) |
1825 | { |
1826 | unsigned long flags; |
1827 | unsigned int irq; |
1828 | |
1829 | if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) |
1830 | flags = 0; |
1831 | else |
1832 | flags = IRQF_SHARED; |
1833 | |
1834 | if (bp->flags & USING_MSIX_FLAG) |
1835 | irq = bp->msix_table[0].vector; |
1836 | else |
1837 | irq = bp->pdev->irq; |
1838 | |
1839 | return request_irq(irq, handler: bnx2x_interrupt, flags, name: bp->dev->name, dev: bp->dev); |
1840 | } |
1841 | |
1842 | static int bnx2x_setup_irqs(struct bnx2x *bp) |
1843 | { |
1844 | int rc = 0; |
1845 | if (bp->flags & USING_MSIX_FLAG && |
1846 | !(bp->flags & USING_SINGLE_MSIX_FLAG)) { |
1847 | rc = bnx2x_req_msix_irqs(bp); |
1848 | if (rc) |
1849 | return rc; |
1850 | } else { |
1851 | rc = bnx2x_req_irq(bp); |
1852 | if (rc) { |
1853 | BNX2X_ERR("IRQ request failed rc %d, aborting\n" , rc); |
1854 | return rc; |
1855 | } |
1856 | if (bp->flags & USING_MSI_FLAG) { |
1857 | bp->dev->irq = bp->pdev->irq; |
1858 | netdev_info(dev: bp->dev, format: "using MSI IRQ %d\n" , |
1859 | bp->dev->irq); |
1860 | } |
1861 | if (bp->flags & USING_MSIX_FLAG) { |
1862 | bp->dev->irq = bp->msix_table[0].vector; |
1863 | netdev_info(dev: bp->dev, format: "using MSIX IRQ %d\n" , |
1864 | bp->dev->irq); |
1865 | } |
1866 | } |
1867 | |
1868 | return 0; |
1869 | } |
1870 | |
1871 | static void bnx2x_napi_enable_cnic(struct bnx2x *bp) |
1872 | { |
1873 | int i; |
1874 | |
1875 | for_each_rx_queue_cnic(bp, i) { |
1876 | napi_enable(n: &bnx2x_fp(bp, i, napi)); |
1877 | } |
1878 | } |
1879 | |
1880 | static void bnx2x_napi_enable(struct bnx2x *bp) |
1881 | { |
1882 | int i; |
1883 | |
1884 | for_each_eth_queue(bp, i) { |
1885 | napi_enable(n: &bnx2x_fp(bp, i, napi)); |
1886 | } |
1887 | } |
1888 | |
1889 | static void bnx2x_napi_disable_cnic(struct bnx2x *bp) |
1890 | { |
1891 | int i; |
1892 | |
1893 | for_each_rx_queue_cnic(bp, i) { |
1894 | napi_disable(n: &bnx2x_fp(bp, i, napi)); |
1895 | } |
1896 | } |
1897 | |
1898 | static void bnx2x_napi_disable(struct bnx2x *bp) |
1899 | { |
1900 | int i; |
1901 | |
1902 | for_each_eth_queue(bp, i) { |
1903 | napi_disable(n: &bnx2x_fp(bp, i, napi)); |
1904 | } |
1905 | } |
1906 | |
1907 | void bnx2x_netif_start(struct bnx2x *bp) |
1908 | { |
1909 | if (netif_running(dev: bp->dev)) { |
1910 | bnx2x_napi_enable(bp); |
1911 | if (CNIC_LOADED(bp)) |
1912 | bnx2x_napi_enable_cnic(bp); |
1913 | bnx2x_int_enable(bp); |
1914 | if (bp->state == BNX2X_STATE_OPEN) |
1915 | netif_tx_wake_all_queues(dev: bp->dev); |
1916 | } |
1917 | } |
1918 | |
1919 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) |
1920 | { |
1921 | bnx2x_int_disable_sync(bp, disable_hw); |
1922 | bnx2x_napi_disable(bp); |
1923 | if (CNIC_LOADED(bp)) |
1924 | bnx2x_napi_disable_cnic(bp); |
1925 | } |
1926 | |
1927 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1928 | struct net_device *sb_dev) |
1929 | { |
1930 | struct bnx2x *bp = netdev_priv(dev); |
1931 | |
1932 | if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { |
1933 | struct ethhdr *hdr = (struct ethhdr *)skb->data; |
1934 | u16 ether_type = ntohs(hdr->h_proto); |
1935 | |
1936 | /* Skip VLAN tag if present */ |
1937 | if (ether_type == ETH_P_8021Q) { |
1938 | struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); |
1939 | |
1940 | ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); |
1941 | } |
1942 | |
1943 | /* If ethertype is FCoE or FIP - use FCoE ring */ |
1944 | if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) |
1945 | return bnx2x_fcoe_tx(bp, txq_index); |
1946 | } |
1947 | |
1948 | /* select a non-FCoE queue */ |
1949 | return netdev_pick_tx(dev, skb, NULL) % |
1950 | (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); |
1951 | } |
1952 | |
1953 | void bnx2x_set_num_queues(struct bnx2x *bp) |
1954 | { |
1955 | /* RSS queues */ |
1956 | bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); |
1957 | |
1958 | /* override in STORAGE SD modes */ |
1959 | if (IS_MF_STORAGE_ONLY(bp)) |
1960 | bp->num_ethernet_queues = 1; |
1961 | |
1962 | /* Add special queues */ |
1963 | bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ |
1964 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1965 | |
1966 | BNX2X_DEV_INFO("set number of queues to %d\n" , bp->num_queues); |
1967 | } |
1968 | |
1969 | /** |
1970 | * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues |
1971 | * |
1972 | * @bp: Driver handle |
1973 | * @include_cnic: handle cnic case |
1974 | * |
1975 | * We currently support for at most 16 Tx queues for each CoS thus we will |
1976 | * allocate a multiple of 16 for ETH L2 rings according to the value of the |
1977 | * bp->max_cos. |
1978 | * |
1979 | * If there is an FCoE L2 queue the appropriate Tx queue will have the next |
1980 | * index after all ETH L2 indices. |
1981 | * |
1982 | * If the actual number of Tx queues (for each CoS) is less than 16 then there |
1983 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, |
1984 | * 16..31,...) with indices that are not coupled with any real Tx queue. |
1985 | * |
1986 | * The proper configuration of skb->queue_mapping is handled by |
1987 | * bnx2x_select_queue() and __skb_tx_hash(). |
1988 | * |
1989 | * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() |
1990 | * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). |
1991 | */ |
1992 | static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) |
1993 | { |
1994 | int rc, tx, rx; |
1995 | |
1996 | tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; |
1997 | rx = BNX2X_NUM_ETH_QUEUES(bp); |
1998 | |
1999 | /* account for fcoe queue */ |
2000 | if (include_cnic && !NO_FCOE(bp)) { |
2001 | rx++; |
2002 | tx++; |
2003 | } |
2004 | |
2005 | rc = netif_set_real_num_tx_queues(dev: bp->dev, txq: tx); |
2006 | if (rc) { |
2007 | BNX2X_ERR("Failed to set real number of Tx queues: %d\n" , rc); |
2008 | return rc; |
2009 | } |
2010 | rc = netif_set_real_num_rx_queues(dev: bp->dev, rxq: rx); |
2011 | if (rc) { |
2012 | BNX2X_ERR("Failed to set real number of Rx queues: %d\n" , rc); |
2013 | return rc; |
2014 | } |
2015 | |
2016 | DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n" , |
2017 | tx, rx); |
2018 | |
2019 | return rc; |
2020 | } |
2021 | |
2022 | static void bnx2x_set_rx_buf_size(struct bnx2x *bp) |
2023 | { |
2024 | int i; |
2025 | |
2026 | for_each_queue(bp, i) { |
2027 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
2028 | u32 mtu; |
2029 | |
2030 | /* Always use a mini-jumbo MTU for the FCoE L2 ring */ |
2031 | if (IS_FCOE_IDX(i)) |
2032 | /* |
2033 | * Although there are no IP frames expected to arrive to |
2034 | * this ring we still want to add an |
2035 | * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer |
2036 | * overrun attack. |
2037 | */ |
2038 | mtu = BNX2X_FCOE_MINI_JUMBO_MTU; |
2039 | else |
2040 | mtu = bp->dev->mtu; |
2041 | fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + |
2042 | IP_HEADER_ALIGNMENT_PADDING + |
2043 | ETH_OVERHEAD + |
2044 | mtu + |
2045 | BNX2X_FW_RX_ALIGN_END; |
2046 | fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); |
2047 | /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ |
2048 | if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) |
2049 | fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; |
2050 | else |
2051 | fp->rx_frag_size = 0; |
2052 | } |
2053 | } |
2054 | |
2055 | static int (struct bnx2x *bp) |
2056 | { |
2057 | int i; |
2058 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); |
2059 | |
2060 | /* Prepare the initial contents for the indirection table if RSS is |
2061 | * enabled |
2062 | */ |
2063 | for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) |
2064 | bp->rss_conf_obj.ind_table[i] = |
2065 | bp->fp->cl_id + |
2066 | ethtool_rxfh_indir_default(index: i, n_rx_rings: num_eth_queues); |
2067 | |
2068 | /* |
2069 | * For 57710 and 57711 SEARCHER configuration (rss_keys) is |
2070 | * per-port, so if explicit configuration is needed , do it only |
2071 | * for a PMF. |
2072 | * |
2073 | * For 57712 and newer on the other hand it's a per-function |
2074 | * configuration. |
2075 | */ |
2076 | return bnx2x_config_rss_eth(bp, config_hash: bp->port.pmf || !CHIP_IS_E1x(bp)); |
2077 | } |
2078 | |
2079 | int (struct bnx2x *bp, struct bnx2x_rss_config_obj *, |
2080 | bool config_hash, bool enable) |
2081 | { |
2082 | struct bnx2x_config_rss_params params = {NULL}; |
2083 | |
2084 | /* Although RSS is meaningless when there is a single HW queue we |
2085 | * still need it enabled in order to have HW Rx hash generated. |
2086 | * |
2087 | * if (!is_eth_multi(bp)) |
2088 | * bp->multi_mode = ETH_RSS_MODE_DISABLED; |
2089 | */ |
2090 | |
2091 | params.rss_obj = rss_obj; |
2092 | |
2093 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); |
2094 | |
2095 | if (enable) { |
2096 | __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); |
2097 | |
2098 | /* RSS configuration */ |
2099 | __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); |
2100 | __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); |
2101 | __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); |
2102 | __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); |
2103 | if (rss_obj->udp_rss_v4) |
2104 | __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); |
2105 | if (rss_obj->udp_rss_v6) |
2106 | __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); |
2107 | |
2108 | if (!CHIP_IS_E1x(bp)) { |
2109 | /* valid only for TUNN_MODE_VXLAN tunnel mode */ |
2110 | __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); |
2111 | __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); |
2112 | |
2113 | /* valid only for TUNN_MODE_GRE tunnel mode */ |
2114 | __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); |
2115 | } |
2116 | } else { |
2117 | __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); |
2118 | } |
2119 | |
2120 | /* Hash bits */ |
2121 | params.rss_result_mask = MULTI_MASK; |
2122 | |
2123 | memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); |
2124 | |
2125 | if (config_hash) { |
2126 | /* RSS keys */ |
2127 | netdev_rss_key_fill(buffer: params.rss_key, T_ETH_RSS_KEY * 4); |
2128 | __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); |
2129 | } |
2130 | |
2131 | if (IS_PF(bp)) |
2132 | return bnx2x_config_rss(bp, p: ¶ms); |
2133 | else |
2134 | return bnx2x_vfpf_config_rss(bp, params: ¶ms); |
2135 | } |
2136 | |
2137 | static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
2138 | { |
2139 | struct bnx2x_func_state_params func_params = {NULL}; |
2140 | |
2141 | /* Prepare parameters for function state transitions */ |
2142 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
2143 | |
2144 | func_params.f_obj = &bp->func_obj; |
2145 | func_params.cmd = BNX2X_F_CMD_HW_INIT; |
2146 | |
2147 | func_params.params.hw_init.load_phase = load_code; |
2148 | |
2149 | return bnx2x_func_state_change(bp, params: &func_params); |
2150 | } |
2151 | |
2152 | /* |
2153 | * Cleans the object that have internal lists without sending |
2154 | * ramrods. Should be run when interrupts are disabled. |
2155 | */ |
2156 | void bnx2x_squeeze_objects(struct bnx2x *bp) |
2157 | { |
2158 | int rc; |
2159 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
2160 | struct bnx2x_mcast_ramrod_params rparam = {NULL}; |
2161 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; |
2162 | |
2163 | /***************** Cleanup MACs' object first *************************/ |
2164 | |
2165 | /* Wait for completion of requested */ |
2166 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
2167 | /* Perform a dry cleanup */ |
2168 | __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); |
2169 | |
2170 | /* Clean ETH primary MAC */ |
2171 | __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); |
2172 | rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, |
2173 | &ramrod_flags); |
2174 | if (rc != 0) |
2175 | BNX2X_ERR("Failed to clean ETH MACs: %d\n" , rc); |
2176 | |
2177 | /* Cleanup UC list */ |
2178 | vlan_mac_flags = 0; |
2179 | __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); |
2180 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, |
2181 | &ramrod_flags); |
2182 | if (rc != 0) |
2183 | BNX2X_ERR("Failed to clean UC list MACs: %d\n" , rc); |
2184 | |
2185 | /***************** Now clean mcast object *****************************/ |
2186 | rparam.mcast_obj = &bp->mcast_obj; |
2187 | __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); |
2188 | |
2189 | /* Add a DEL command... - Since we're doing a driver cleanup only, |
2190 | * we take a lock surrounding both the initial send and the CONTs, |
2191 | * as we don't want a true completion to disrupt us in the middle. |
2192 | */ |
2193 | netif_addr_lock_bh(dev: bp->dev); |
2194 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_DEL); |
2195 | if (rc < 0) |
2196 | BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n" , |
2197 | rc); |
2198 | |
2199 | /* ...and wait until all pending commands are cleared */ |
2200 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_CONT); |
2201 | while (rc != 0) { |
2202 | if (rc < 0) { |
2203 | BNX2X_ERR("Failed to clean multi-cast object: %d\n" , |
2204 | rc); |
2205 | netif_addr_unlock_bh(dev: bp->dev); |
2206 | return; |
2207 | } |
2208 | |
2209 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_CONT); |
2210 | } |
2211 | netif_addr_unlock_bh(dev: bp->dev); |
2212 | } |
2213 | |
2214 | #ifndef BNX2X_STOP_ON_ERROR |
2215 | #define LOAD_ERROR_EXIT(bp, label) \ |
2216 | do { \ |
2217 | (bp)->state = BNX2X_STATE_ERROR; \ |
2218 | goto label; \ |
2219 | } while (0) |
2220 | |
2221 | #define LOAD_ERROR_EXIT_CNIC(bp, label) \ |
2222 | do { \ |
2223 | bp->cnic_loaded = false; \ |
2224 | goto label; \ |
2225 | } while (0) |
2226 | #else /*BNX2X_STOP_ON_ERROR*/ |
2227 | #define LOAD_ERROR_EXIT(bp, label) \ |
2228 | do { \ |
2229 | (bp)->state = BNX2X_STATE_ERROR; \ |
2230 | (bp)->panic = 1; \ |
2231 | return -EBUSY; \ |
2232 | } while (0) |
2233 | #define LOAD_ERROR_EXIT_CNIC(bp, label) \ |
2234 | do { \ |
2235 | bp->cnic_loaded = false; \ |
2236 | (bp)->panic = 1; \ |
2237 | return -EBUSY; \ |
2238 | } while (0) |
2239 | #endif /*BNX2X_STOP_ON_ERROR*/ |
2240 | |
2241 | static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) |
2242 | { |
2243 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
2244 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
2245 | return; |
2246 | } |
2247 | |
2248 | static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) |
2249 | { |
2250 | int num_groups, vf_headroom = 0; |
2251 | int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; |
2252 | |
2253 | /* number of queues for statistics is number of eth queues + FCoE */ |
2254 | u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; |
2255 | |
2256 | /* Total number of FW statistics requests = |
2257 | * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper |
2258 | * and fcoe l2 queue) stats + num of queues (which includes another 1 |
2259 | * for fcoe l2 queue if applicable) |
2260 | */ |
2261 | bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; |
2262 | |
2263 | /* vf stats appear in the request list, but their data is allocated by |
2264 | * the VFs themselves. We don't include them in the bp->fw_stats_num as |
2265 | * it is used to determine where to place the vf stats queries in the |
2266 | * request struct |
2267 | */ |
2268 | if (IS_SRIOV(bp)) |
2269 | vf_headroom = bnx2x_vf_headroom(bp); |
2270 | |
2271 | /* Request is built from stats_query_header and an array of |
2272 | * stats_query_cmd_group each of which contains |
2273 | * STATS_QUERY_CMD_COUNT rules. The real number or requests is |
2274 | * configured in the stats_query_header. |
2275 | */ |
2276 | num_groups = |
2277 | (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + |
2278 | (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? |
2279 | 1 : 0)); |
2280 | |
2281 | DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n" , |
2282 | bp->fw_stats_num, vf_headroom, num_groups); |
2283 | bp->fw_stats_req_sz = sizeof(struct stats_query_header) + |
2284 | num_groups * sizeof(struct stats_query_cmd_group); |
2285 | |
2286 | /* Data for statistics requests + stats_counter |
2287 | * stats_counter holds per-STORM counters that are incremented |
2288 | * when STORM has finished with the current request. |
2289 | * memory for FCoE offloaded statistics are counted anyway, |
2290 | * even if they will not be sent. |
2291 | * VF stats are not accounted for here as the data of VF stats is stored |
2292 | * in memory allocated by the VF, not here. |
2293 | */ |
2294 | bp->fw_stats_data_sz = sizeof(struct per_port_stats) + |
2295 | sizeof(struct per_pf_stats) + |
2296 | sizeof(struct fcoe_statistics_params) + |
2297 | sizeof(struct per_queue_stats) * num_queue_stats + |
2298 | sizeof(struct stats_counter); |
2299 | |
2300 | bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, |
2301 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
2302 | if (!bp->fw_stats) |
2303 | goto alloc_mem_err; |
2304 | |
2305 | /* Set shortcuts */ |
2306 | bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; |
2307 | bp->fw_stats_req_mapping = bp->fw_stats_mapping; |
2308 | bp->fw_stats_data = (struct bnx2x_fw_stats_data *) |
2309 | ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); |
2310 | bp->fw_stats_data_mapping = bp->fw_stats_mapping + |
2311 | bp->fw_stats_req_sz; |
2312 | |
2313 | DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n" , |
2314 | U64_HI(bp->fw_stats_req_mapping), |
2315 | U64_LO(bp->fw_stats_req_mapping)); |
2316 | DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n" , |
2317 | U64_HI(bp->fw_stats_data_mapping), |
2318 | U64_LO(bp->fw_stats_data_mapping)); |
2319 | return 0; |
2320 | |
2321 | alloc_mem_err: |
2322 | bnx2x_free_fw_stats_mem(bp); |
2323 | BNX2X_ERR("Can't allocate FW stats memory\n" ); |
2324 | return -ENOMEM; |
2325 | } |
2326 | |
2327 | /* send load request to mcp and analyze response */ |
2328 | static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) |
2329 | { |
2330 | u32 param; |
2331 | |
2332 | /* init fw_seq */ |
2333 | bp->fw_seq = |
2334 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & |
2335 | DRV_MSG_SEQ_NUMBER_MASK); |
2336 | BNX2X_DEV_INFO("fw_seq 0x%08x\n" , bp->fw_seq); |
2337 | |
2338 | /* Get current FW pulse sequence */ |
2339 | bp->fw_drv_pulse_wr_seq = |
2340 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & |
2341 | DRV_PULSE_SEQ_MASK); |
2342 | BNX2X_DEV_INFO("drv_pulse 0x%x\n" , bp->fw_drv_pulse_wr_seq); |
2343 | |
2344 | param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; |
2345 | |
2346 | if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) |
2347 | param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; |
2348 | |
2349 | /* load request */ |
2350 | (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); |
2351 | |
2352 | /* if mcp fails to respond we must abort */ |
2353 | if (!(*load_code)) { |
2354 | BNX2X_ERR("MCP response failure, aborting\n" ); |
2355 | return -EBUSY; |
2356 | } |
2357 | |
2358 | /* If mcp refused (e.g. other port is in diagnostic mode) we |
2359 | * must abort |
2360 | */ |
2361 | if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { |
2362 | BNX2X_ERR("MCP refused load request, aborting\n" ); |
2363 | return -EBUSY; |
2364 | } |
2365 | return 0; |
2366 | } |
2367 | |
2368 | /* check whether another PF has already loaded FW to chip. In |
2369 | * virtualized environments a pf from another VM may have already |
2370 | * initialized the device including loading FW |
2371 | */ |
2372 | int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) |
2373 | { |
2374 | /* is another pf loaded on this engine? */ |
2375 | if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && |
2376 | load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { |
2377 | u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; |
2378 | u32 loaded_fw; |
2379 | |
2380 | /* read loaded FW from chip */ |
2381 | loaded_fw = REG_RD(bp, XSEM_REG_PRAM); |
2382 | |
2383 | loaded_fw_major = loaded_fw & 0xff; |
2384 | loaded_fw_minor = (loaded_fw >> 8) & 0xff; |
2385 | loaded_fw_rev = (loaded_fw >> 16) & 0xff; |
2386 | loaded_fw_eng = (loaded_fw >> 24) & 0xff; |
2387 | |
2388 | DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n" , |
2389 | loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); |
2390 | |
2391 | /* abort nic load if version mismatch */ |
2392 | if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || |
2393 | loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || |
2394 | loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || |
2395 | loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { |
2396 | if (print_err) |
2397 | BNX2X_ERR("loaded FW incompatible. Aborting\n" ); |
2398 | else |
2399 | BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n" ); |
2400 | |
2401 | return -EBUSY; |
2402 | } |
2403 | } |
2404 | return 0; |
2405 | } |
2406 | |
2407 | /* returns the "mcp load_code" according to global load_count array */ |
2408 | static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) |
2409 | { |
2410 | int path = BP_PATH(bp); |
2411 | |
2412 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n" , |
2413 | path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], |
2414 | bnx2x_load_count[path][2]); |
2415 | bnx2x_load_count[path][0]++; |
2416 | bnx2x_load_count[path][1 + port]++; |
2417 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n" , |
2418 | path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], |
2419 | bnx2x_load_count[path][2]); |
2420 | if (bnx2x_load_count[path][0] == 1) |
2421 | return FW_MSG_CODE_DRV_LOAD_COMMON; |
2422 | else if (bnx2x_load_count[path][1 + port] == 1) |
2423 | return FW_MSG_CODE_DRV_LOAD_PORT; |
2424 | else |
2425 | return FW_MSG_CODE_DRV_LOAD_FUNCTION; |
2426 | } |
2427 | |
2428 | /* mark PMF if applicable */ |
2429 | static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) |
2430 | { |
2431 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
2432 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || |
2433 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { |
2434 | bp->port.pmf = 1; |
2435 | /* We need the barrier to ensure the ordering between the |
2436 | * writing to bp->port.pmf here and reading it from the |
2437 | * bnx2x_periodic_task(). |
2438 | */ |
2439 | smp_mb(); |
2440 | } else { |
2441 | bp->port.pmf = 0; |
2442 | } |
2443 | |
2444 | DP(NETIF_MSG_LINK, "pmf %d\n" , bp->port.pmf); |
2445 | } |
2446 | |
2447 | static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) |
2448 | { |
2449 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
2450 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && |
2451 | (bp->common.shmem2_base)) { |
2452 | if (SHMEM2_HAS(bp, dcc_support)) |
2453 | SHMEM2_WR(bp, dcc_support, |
2454 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | |
2455 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); |
2456 | if (SHMEM2_HAS(bp, afex_driver_support)) |
2457 | SHMEM2_WR(bp, afex_driver_support, |
2458 | SHMEM_AFEX_SUPPORTED_VERSION_ONE); |
2459 | } |
2460 | |
2461 | /* Set AFEX default VLAN tag to an invalid value */ |
2462 | bp->afex_def_vlan_tag = -1; |
2463 | } |
2464 | |
2465 | /** |
2466 | * bnx2x_bz_fp - zero content of the fastpath structure. |
2467 | * |
2468 | * @bp: driver handle |
2469 | * @index: fastpath index to be zeroed |
2470 | * |
2471 | * Makes sure the contents of the bp->fp[index].napi is kept |
2472 | * intact. |
2473 | */ |
2474 | static void bnx2x_bz_fp(struct bnx2x *bp, int index) |
2475 | { |
2476 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
2477 | int cos; |
2478 | struct napi_struct orig_napi = fp->napi; |
2479 | struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; |
2480 | |
2481 | /* bzero bnx2x_fastpath contents */ |
2482 | if (fp->tpa_info) |
2483 | memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * |
2484 | sizeof(struct bnx2x_agg_info)); |
2485 | memset(fp, 0, sizeof(*fp)); |
2486 | |
2487 | /* Restore the NAPI object as it has been already initialized */ |
2488 | fp->napi = orig_napi; |
2489 | fp->tpa_info = orig_tpa_info; |
2490 | fp->bp = bp; |
2491 | fp->index = index; |
2492 | if (IS_ETH_FP(fp)) |
2493 | fp->max_cos = bp->max_cos; |
2494 | else |
2495 | /* Special queues support only one CoS */ |
2496 | fp->max_cos = 1; |
2497 | |
2498 | /* Init txdata pointers */ |
2499 | if (IS_FCOE_FP(fp)) |
2500 | fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; |
2501 | if (IS_ETH_FP(fp)) |
2502 | for_each_cos_in_tx_queue(fp, cos) |
2503 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * |
2504 | BNX2X_NUM_ETH_QUEUES(bp) + index]; |
2505 | |
2506 | /* set the tpa flag for each queue. The tpa flag determines the queue |
2507 | * minimal size so it must be set prior to queue memory allocation |
2508 | */ |
2509 | if (bp->dev->features & NETIF_F_LRO) |
2510 | fp->mode = TPA_MODE_LRO; |
2511 | else if (bp->dev->features & NETIF_F_GRO_HW) |
2512 | fp->mode = TPA_MODE_GRO; |
2513 | else |
2514 | fp->mode = TPA_MODE_DISABLED; |
2515 | |
2516 | /* We don't want TPA if it's disabled in bp |
2517 | * or if this is an FCoE L2 ring. |
2518 | */ |
2519 | if (bp->disable_tpa || IS_FCOE_FP(fp)) |
2520 | fp->mode = TPA_MODE_DISABLED; |
2521 | } |
2522 | |
2523 | void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) |
2524 | { |
2525 | u32 cur; |
2526 | |
2527 | if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) |
2528 | return; |
2529 | |
2530 | cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); |
2531 | DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n" , |
2532 | cur, state); |
2533 | |
2534 | SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); |
2535 | } |
2536 | |
2537 | int bnx2x_load_cnic(struct bnx2x *bp) |
2538 | { |
2539 | int i, rc, port = BP_PORT(bp); |
2540 | |
2541 | DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n" ); |
2542 | |
2543 | mutex_init(&bp->cnic_mutex); |
2544 | |
2545 | if (IS_PF(bp)) { |
2546 | rc = bnx2x_alloc_mem_cnic(bp); |
2547 | if (rc) { |
2548 | BNX2X_ERR("Unable to allocate bp memory for cnic\n" ); |
2549 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2550 | } |
2551 | } |
2552 | |
2553 | rc = bnx2x_alloc_fp_mem_cnic(bp); |
2554 | if (rc) { |
2555 | BNX2X_ERR("Unable to allocate memory for cnic fps\n" ); |
2556 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2557 | } |
2558 | |
2559 | /* Update the number of queues with the cnic queues */ |
2560 | rc = bnx2x_set_real_num_queues(bp, include_cnic: 1); |
2561 | if (rc) { |
2562 | BNX2X_ERR("Unable to set real_num_queues including cnic\n" ); |
2563 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2564 | } |
2565 | |
2566 | /* Add all CNIC NAPI objects */ |
2567 | bnx2x_add_all_napi_cnic(bp); |
2568 | DP(NETIF_MSG_IFUP, "cnic napi added\n" ); |
2569 | bnx2x_napi_enable_cnic(bp); |
2570 | |
2571 | rc = bnx2x_init_hw_func_cnic(bp); |
2572 | if (rc) |
2573 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); |
2574 | |
2575 | bnx2x_nic_init_cnic(bp); |
2576 | |
2577 | if (IS_PF(bp)) { |
2578 | /* Enable Timer scan */ |
2579 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); |
2580 | |
2581 | /* setup cnic queues */ |
2582 | for_each_cnic_queue(bp, i) { |
2583 | rc = bnx2x_setup_queue(bp, fp: &bp->fp[i], leading: 0); |
2584 | if (rc) { |
2585 | BNX2X_ERR("Queue setup failed\n" ); |
2586 | LOAD_ERROR_EXIT(bp, load_error_cnic2); |
2587 | } |
2588 | } |
2589 | } |
2590 | |
2591 | /* Initialize Rx filter. */ |
2592 | bnx2x_set_rx_mode_inner(bp); |
2593 | |
2594 | /* re-read iscsi info */ |
2595 | bnx2x_get_iscsi_info(bp); |
2596 | bnx2x_setup_cnic_irq_info(bp); |
2597 | bnx2x_setup_cnic_info(bp); |
2598 | bp->cnic_loaded = true; |
2599 | if (bp->state == BNX2X_STATE_OPEN) |
2600 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); |
2601 | |
2602 | DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n" ); |
2603 | |
2604 | return 0; |
2605 | |
2606 | #ifndef BNX2X_STOP_ON_ERROR |
2607 | load_error_cnic2: |
2608 | /* Disable Timer scan */ |
2609 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); |
2610 | |
2611 | load_error_cnic1: |
2612 | bnx2x_napi_disable_cnic(bp); |
2613 | /* Update the number of queues without the cnic queues */ |
2614 | if (bnx2x_set_real_num_queues(bp, include_cnic: 0)) |
2615 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n" ); |
2616 | load_error_cnic0: |
2617 | BNX2X_ERR("CNIC-related load failed\n" ); |
2618 | bnx2x_free_fp_mem_cnic(bp); |
2619 | bnx2x_free_mem_cnic(bp); |
2620 | return rc; |
2621 | #endif /* ! BNX2X_STOP_ON_ERROR */ |
2622 | } |
2623 | |
2624 | /* must be called with rtnl_lock */ |
2625 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
2626 | { |
2627 | int port = BP_PORT(bp); |
2628 | int i, rc = 0, load_code = 0; |
2629 | |
2630 | DP(NETIF_MSG_IFUP, "Starting NIC load\n" ); |
2631 | DP(NETIF_MSG_IFUP, |
2632 | "CNIC is %s\n" , CNIC_ENABLED(bp) ? "enabled" : "disabled" ); |
2633 | |
2634 | #ifdef BNX2X_STOP_ON_ERROR |
2635 | if (unlikely(bp->panic)) { |
2636 | BNX2X_ERR("Can't load NIC when there is panic\n" ); |
2637 | return -EPERM; |
2638 | } |
2639 | #endif |
2640 | |
2641 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
2642 | |
2643 | /* zero the structure w/o any lock, before SP handler is initialized */ |
2644 | memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); |
2645 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
2646 | &bp->last_reported_link.link_report_flags); |
2647 | |
2648 | if (IS_PF(bp)) |
2649 | /* must be called before memory allocation and HW init */ |
2650 | bnx2x_ilt_set_info(bp); |
2651 | |
2652 | /* |
2653 | * Zero fastpath structures preserving invariants like napi, which are |
2654 | * allocated only once, fp index, max_cos, bp pointer. |
2655 | * Also set fp->mode and txdata_ptr. |
2656 | */ |
2657 | DP(NETIF_MSG_IFUP, "num queues: %d" , bp->num_queues); |
2658 | for_each_queue(bp, i) |
2659 | bnx2x_bz_fp(bp, index: i); |
2660 | memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + |
2661 | bp->num_cnic_queues) * |
2662 | sizeof(struct bnx2x_fp_txdata)); |
2663 | |
2664 | bp->fcoe_init = false; |
2665 | |
2666 | /* Set the receive queues buffer size */ |
2667 | bnx2x_set_rx_buf_size(bp); |
2668 | |
2669 | if (IS_PF(bp)) { |
2670 | rc = bnx2x_alloc_mem(bp); |
2671 | if (rc) { |
2672 | BNX2X_ERR("Unable to allocate bp memory\n" ); |
2673 | return rc; |
2674 | } |
2675 | } |
2676 | |
2677 | /* need to be done after alloc mem, since it's self adjusting to amount |
2678 | * of memory available for RSS queues |
2679 | */ |
2680 | rc = bnx2x_alloc_fp_mem(bp); |
2681 | if (rc) { |
2682 | BNX2X_ERR("Unable to allocate memory for fps\n" ); |
2683 | LOAD_ERROR_EXIT(bp, load_error0); |
2684 | } |
2685 | |
2686 | /* Allocated memory for FW statistics */ |
2687 | rc = bnx2x_alloc_fw_stats_mem(bp); |
2688 | if (rc) |
2689 | LOAD_ERROR_EXIT(bp, load_error0); |
2690 | |
2691 | /* request pf to initialize status blocks */ |
2692 | if (IS_VF(bp)) { |
2693 | rc = bnx2x_vfpf_init(bp); |
2694 | if (rc) |
2695 | LOAD_ERROR_EXIT(bp, load_error0); |
2696 | } |
2697 | |
2698 | /* As long as bnx2x_alloc_mem() may possibly update |
2699 | * bp->num_queues, bnx2x_set_real_num_queues() should always |
2700 | * come after it. At this stage cnic queues are not counted. |
2701 | */ |
2702 | rc = bnx2x_set_real_num_queues(bp, include_cnic: 0); |
2703 | if (rc) { |
2704 | BNX2X_ERR("Unable to set real_num_queues\n" ); |
2705 | LOAD_ERROR_EXIT(bp, load_error0); |
2706 | } |
2707 | |
2708 | /* configure multi cos mappings in kernel. |
2709 | * this configuration may be overridden by a multi class queue |
2710 | * discipline or by a dcbx negotiation result. |
2711 | */ |
2712 | bnx2x_setup_tc(dev: bp->dev, num_tc: bp->max_cos); |
2713 | |
2714 | /* Add all NAPI objects */ |
2715 | bnx2x_add_all_napi(bp); |
2716 | DP(NETIF_MSG_IFUP, "napi added\n" ); |
2717 | bnx2x_napi_enable(bp); |
2718 | bp->nic_stopped = false; |
2719 | |
2720 | if (IS_PF(bp)) { |
2721 | /* set pf load just before approaching the MCP */ |
2722 | bnx2x_set_pf_load(bp); |
2723 | |
2724 | /* if mcp exists send load request and analyze response */ |
2725 | if (!BP_NOMCP(bp)) { |
2726 | /* attempt to load pf */ |
2727 | rc = bnx2x_nic_load_request(bp, load_code: &load_code); |
2728 | if (rc) |
2729 | LOAD_ERROR_EXIT(bp, load_error1); |
2730 | |
2731 | /* what did mcp say? */ |
2732 | rc = bnx2x_compare_fw_ver(bp, load_code, print_err: true); |
2733 | if (rc) { |
2734 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2735 | LOAD_ERROR_EXIT(bp, load_error2); |
2736 | } |
2737 | } else { |
2738 | load_code = bnx2x_nic_load_no_mcp(bp, port); |
2739 | } |
2740 | |
2741 | /* mark pmf if applicable */ |
2742 | bnx2x_nic_load_pmf(bp, load_code); |
2743 | |
2744 | /* Init Function state controlling object */ |
2745 | bnx2x__init_func_obj(bp); |
2746 | |
2747 | /* Initialize HW */ |
2748 | rc = bnx2x_init_hw(bp, load_code); |
2749 | if (rc) { |
2750 | BNX2X_ERR("HW init failed, aborting\n" ); |
2751 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2752 | LOAD_ERROR_EXIT(bp, load_error2); |
2753 | } |
2754 | } |
2755 | |
2756 | bnx2x_pre_irq_nic_init(bp); |
2757 | |
2758 | /* Connect to IRQs */ |
2759 | rc = bnx2x_setup_irqs(bp); |
2760 | if (rc) { |
2761 | BNX2X_ERR("setup irqs failed\n" ); |
2762 | if (IS_PF(bp)) |
2763 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2764 | LOAD_ERROR_EXIT(bp, load_error2); |
2765 | } |
2766 | |
2767 | /* Init per-function objects */ |
2768 | if (IS_PF(bp)) { |
2769 | /* Setup NIC internals and enable interrupts */ |
2770 | bnx2x_post_irq_nic_init(bp, load_code); |
2771 | |
2772 | bnx2x_init_bp_objs(bp); |
2773 | bnx2x_iov_nic_init(bp); |
2774 | |
2775 | /* Set AFEX default VLAN tag to an invalid value */ |
2776 | bp->afex_def_vlan_tag = -1; |
2777 | bnx2x_nic_load_afex_dcc(bp, load_code); |
2778 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; |
2779 | rc = bnx2x_func_start(bp); |
2780 | if (rc) { |
2781 | BNX2X_ERR("Function start failed!\n" ); |
2782 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2783 | |
2784 | LOAD_ERROR_EXIT(bp, load_error3); |
2785 | } |
2786 | |
2787 | /* Send LOAD_DONE command to MCP */ |
2788 | if (!BP_NOMCP(bp)) { |
2789 | load_code = bnx2x_fw_command(bp, |
2790 | DRV_MSG_CODE_LOAD_DONE, param: 0); |
2791 | if (!load_code) { |
2792 | BNX2X_ERR("MCP response failure, aborting\n" ); |
2793 | rc = -EBUSY; |
2794 | LOAD_ERROR_EXIT(bp, load_error3); |
2795 | } |
2796 | } |
2797 | |
2798 | /* initialize FW coalescing state machines in RAM */ |
2799 | bnx2x_update_coalesce(bp); |
2800 | } |
2801 | |
2802 | /* setup the leading queue */ |
2803 | rc = bnx2x_setup_leading(bp); |
2804 | if (rc) { |
2805 | BNX2X_ERR("Setup leading failed!\n" ); |
2806 | LOAD_ERROR_EXIT(bp, load_error3); |
2807 | } |
2808 | |
2809 | /* set up the rest of the queues */ |
2810 | for_each_nondefault_eth_queue(bp, i) { |
2811 | if (IS_PF(bp)) |
2812 | rc = bnx2x_setup_queue(bp, fp: &bp->fp[i], leading: false); |
2813 | else /* VF */ |
2814 | rc = bnx2x_vfpf_setup_q(bp, fp: &bp->fp[i], is_leading: false); |
2815 | if (rc) { |
2816 | BNX2X_ERR("Queue %d setup failed\n" , i); |
2817 | LOAD_ERROR_EXIT(bp, load_error3); |
2818 | } |
2819 | } |
2820 | |
2821 | /* setup rss */ |
2822 | rc = bnx2x_init_rss(bp); |
2823 | if (rc) { |
2824 | BNX2X_ERR("PF RSS init failed\n" ); |
2825 | LOAD_ERROR_EXIT(bp, load_error3); |
2826 | } |
2827 | |
2828 | /* Now when Clients are configured we are ready to work */ |
2829 | bp->state = BNX2X_STATE_OPEN; |
2830 | |
2831 | /* Configure a ucast MAC */ |
2832 | if (IS_PF(bp)) |
2833 | rc = bnx2x_set_eth_mac(bp, set: true); |
2834 | else /* vf */ |
2835 | rc = bnx2x_vfpf_config_mac(bp, addr: bp->dev->dev_addr, vf_qid: bp->fp->index, |
2836 | set: true); |
2837 | if (rc) { |
2838 | BNX2X_ERR("Setting Ethernet MAC failed\n" ); |
2839 | LOAD_ERROR_EXIT(bp, load_error3); |
2840 | } |
2841 | |
2842 | if (IS_PF(bp) && bp->pending_max) { |
2843 | bnx2x_update_max_mf_config(bp, value: bp->pending_max); |
2844 | bp->pending_max = 0; |
2845 | } |
2846 | |
2847 | bp->force_link_down = false; |
2848 | if (bp->port.pmf) { |
2849 | rc = bnx2x_initial_phy_init(bp, load_mode); |
2850 | if (rc) |
2851 | LOAD_ERROR_EXIT(bp, load_error3); |
2852 | } |
2853 | bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; |
2854 | |
2855 | /* Start fast path */ |
2856 | |
2857 | /* Re-configure vlan filters */ |
2858 | rc = bnx2x_vlan_reconfigure_vid(bp); |
2859 | if (rc) |
2860 | LOAD_ERROR_EXIT(bp, load_error3); |
2861 | |
2862 | /* Initialize Rx filter. */ |
2863 | bnx2x_set_rx_mode_inner(bp); |
2864 | |
2865 | if (bp->flags & PTP_SUPPORTED) { |
2866 | bnx2x_register_phc(bp); |
2867 | bnx2x_init_ptp(bp); |
2868 | bnx2x_configure_ptp_filters(bp); |
2869 | } |
2870 | /* Start Tx */ |
2871 | switch (load_mode) { |
2872 | case LOAD_NORMAL: |
2873 | /* Tx queue should be only re-enabled */ |
2874 | netif_tx_wake_all_queues(dev: bp->dev); |
2875 | break; |
2876 | |
2877 | case LOAD_OPEN: |
2878 | netif_tx_start_all_queues(dev: bp->dev); |
2879 | smp_mb__after_atomic(); |
2880 | break; |
2881 | |
2882 | case LOAD_DIAG: |
2883 | case LOAD_LOOPBACK_EXT: |
2884 | bp->state = BNX2X_STATE_DIAG; |
2885 | break; |
2886 | |
2887 | default: |
2888 | break; |
2889 | } |
2890 | |
2891 | if (bp->port.pmf) |
2892 | bnx2x_update_drv_flags(bp, flags: 1 << DRV_FLAGS_PORT_MASK, set: 0); |
2893 | else |
2894 | bnx2x__link_status_update(bp); |
2895 | |
2896 | /* start the timer */ |
2897 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
2898 | |
2899 | if (CNIC_ENABLED(bp)) |
2900 | bnx2x_load_cnic(bp); |
2901 | |
2902 | if (IS_PF(bp)) |
2903 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, verbose: 0); |
2904 | |
2905 | if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { |
2906 | /* mark driver is loaded in shmem2 */ |
2907 | u32 val; |
2908 | val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); |
2909 | val &= ~DRV_FLAGS_MTU_MASK; |
2910 | val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); |
2911 | SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], |
2912 | val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | |
2913 | DRV_FLAGS_CAPABILITIES_LOADED_L2); |
2914 | } |
2915 | |
2916 | /* Wait for all pending SP commands to complete */ |
2917 | if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, mask: ~0x0UL)) { |
2918 | BNX2X_ERR("Timeout waiting for SP elements to complete\n" ); |
2919 | bnx2x_nic_unload(bp, UNLOAD_CLOSE, keep_link: false); |
2920 | return -EBUSY; |
2921 | } |
2922 | |
2923 | /* Update driver data for On-Chip MFW dump. */ |
2924 | if (IS_PF(bp)) |
2925 | bnx2x_update_mfw_dump(bp); |
2926 | |
2927 | /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ |
2928 | if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) |
2929 | bnx2x_dcbx_init(bp, update_shmem: false); |
2930 | |
2931 | if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) |
2932 | bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); |
2933 | |
2934 | DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n" ); |
2935 | |
2936 | return 0; |
2937 | |
2938 | #ifndef BNX2X_STOP_ON_ERROR |
2939 | load_error3: |
2940 | if (IS_PF(bp)) { |
2941 | bnx2x_int_disable_sync(bp, disable_hw: 1); |
2942 | |
2943 | /* Clean queueable objects */ |
2944 | bnx2x_squeeze_objects(bp); |
2945 | } |
2946 | |
2947 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
2948 | bnx2x_free_skbs(bp); |
2949 | for_each_rx_queue(bp, i) |
2950 | bnx2x_free_rx_sge_range(bp, fp: bp->fp + i, NUM_RX_SGE); |
2951 | |
2952 | /* Release IRQs */ |
2953 | bnx2x_free_irq(bp); |
2954 | load_error2: |
2955 | if (IS_PF(bp) && !BP_NOMCP(bp)) { |
2956 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, param: 0); |
2957 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, param: 0); |
2958 | } |
2959 | |
2960 | bp->port.pmf = 0; |
2961 | load_error1: |
2962 | bnx2x_napi_disable(bp); |
2963 | bnx2x_del_all_napi(bp); |
2964 | bp->nic_stopped = true; |
2965 | |
2966 | /* clear pf_load status, as it was already set */ |
2967 | if (IS_PF(bp)) |
2968 | bnx2x_clear_pf_load(bp); |
2969 | load_error0: |
2970 | bnx2x_free_fw_stats_mem(bp); |
2971 | bnx2x_free_fp_mem(bp); |
2972 | bnx2x_free_mem(bp); |
2973 | |
2974 | return rc; |
2975 | #endif /* ! BNX2X_STOP_ON_ERROR */ |
2976 | } |
2977 | |
2978 | int bnx2x_drain_tx_queues(struct bnx2x *bp) |
2979 | { |
2980 | u8 rc = 0, cos, i; |
2981 | |
2982 | /* Wait until tx fastpath tasks complete */ |
2983 | for_each_tx_queue(bp, i) { |
2984 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
2985 | |
2986 | for_each_cos_in_tx_queue(fp, cos) |
2987 | rc = bnx2x_clean_tx_queue(bp, txdata: fp->txdata_ptr[cos]); |
2988 | if (rc) |
2989 | return rc; |
2990 | } |
2991 | return 0; |
2992 | } |
2993 | |
2994 | /* must be called with rtnl_lock */ |
2995 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) |
2996 | { |
2997 | int i; |
2998 | bool global = false; |
2999 | |
3000 | DP(NETIF_MSG_IFUP, "Starting NIC unload\n" ); |
3001 | |
3002 | if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) |
3003 | bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); |
3004 | |
3005 | /* mark driver is unloaded in shmem2 */ |
3006 | if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { |
3007 | u32 val; |
3008 | val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); |
3009 | SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], |
3010 | val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); |
3011 | } |
3012 | |
3013 | if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && |
3014 | (bp->state == BNX2X_STATE_CLOSED || |
3015 | bp->state == BNX2X_STATE_ERROR)) { |
3016 | /* We can get here if the driver has been unloaded |
3017 | * during parity error recovery and is either waiting for a |
3018 | * leader to complete or for other functions to unload and |
3019 | * then ifdown has been issued. In this case we want to |
3020 | * unload and let other functions to complete a recovery |
3021 | * process. |
3022 | */ |
3023 | bp->recovery_state = BNX2X_RECOVERY_DONE; |
3024 | bp->is_leader = 0; |
3025 | bnx2x_release_leader_lock(bp); |
3026 | smp_mb(); |
3027 | |
3028 | DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n" ); |
3029 | BNX2X_ERR("Can't unload in closed or error state\n" ); |
3030 | return -EINVAL; |
3031 | } |
3032 | |
3033 | /* Nothing to do during unload if previous bnx2x_nic_load() |
3034 | * have not completed successfully - all resources are released. |
3035 | * |
3036 | * we can get here only after unsuccessful ndo_* callback, during which |
3037 | * dev->IFF_UP flag is still on. |
3038 | */ |
3039 | if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) |
3040 | return 0; |
3041 | |
3042 | /* It's important to set the bp->state to the value different from |
3043 | * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() |
3044 | * may restart the Tx from the NAPI context (see bnx2x_tx_int()). |
3045 | */ |
3046 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
3047 | smp_mb(); |
3048 | |
3049 | /* indicate to VFs that the PF is going down */ |
3050 | bnx2x_iov_channel_down(bp); |
3051 | |
3052 | if (CNIC_LOADED(bp)) |
3053 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); |
3054 | |
3055 | /* Stop Tx */ |
3056 | bnx2x_tx_disable(bp); |
3057 | netdev_reset_tc(dev: bp->dev); |
3058 | |
3059 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
3060 | |
3061 | del_timer_sync(timer: &bp->timer); |
3062 | |
3063 | if (IS_PF(bp) && !BP_NOMCP(bp)) { |
3064 | /* Set ALWAYS_ALIVE bit in shmem */ |
3065 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
3066 | bnx2x_drv_pulse(bp); |
3067 | bnx2x_stats_handle(bp, event: STATS_EVENT_STOP); |
3068 | bnx2x_save_statistics(bp); |
3069 | } |
3070 | |
3071 | /* wait till consumers catch up with producers in all queues. |
3072 | * If we're recovering, FW can't write to host so no reason |
3073 | * to wait for the queues to complete all Tx. |
3074 | */ |
3075 | if (unload_mode != UNLOAD_RECOVERY) |
3076 | bnx2x_drain_tx_queues(bp); |
3077 | |
3078 | /* if VF indicate to PF this function is going down (PF will delete sp |
3079 | * elements and clear initializations |
3080 | */ |
3081 | if (IS_VF(bp)) { |
3082 | bnx2x_clear_vlan_info(bp); |
3083 | bnx2x_vfpf_close_vf(bp); |
3084 | } else if (unload_mode != UNLOAD_RECOVERY) { |
3085 | /* if this is a normal/close unload need to clean up chip*/ |
3086 | bnx2x_chip_cleanup(bp, unload_mode, keep_link); |
3087 | } else { |
3088 | /* Send the UNLOAD_REQUEST to the MCP */ |
3089 | bnx2x_send_unload_req(bp, unload_mode); |
3090 | |
3091 | /* Prevent transactions to host from the functions on the |
3092 | * engine that doesn't reset global blocks in case of global |
3093 | * attention once global blocks are reset and gates are opened |
3094 | * (the engine which leader will perform the recovery |
3095 | * last). |
3096 | */ |
3097 | if (!CHIP_IS_E1x(bp)) |
3098 | bnx2x_pf_disable(bp); |
3099 | |
3100 | if (!bp->nic_stopped) { |
3101 | /* Disable HW interrupts, NAPI */ |
3102 | bnx2x_netif_stop(bp, disable_hw: 1); |
3103 | /* Delete all NAPI objects */ |
3104 | bnx2x_del_all_napi(bp); |
3105 | if (CNIC_LOADED(bp)) |
3106 | bnx2x_del_all_napi_cnic(bp); |
3107 | /* Release IRQs */ |
3108 | bnx2x_free_irq(bp); |
3109 | bp->nic_stopped = true; |
3110 | } |
3111 | |
3112 | /* Report UNLOAD_DONE to MCP */ |
3113 | bnx2x_send_unload_done(bp, keep_link: false); |
3114 | } |
3115 | |
3116 | /* |
3117 | * At this stage no more interrupts will arrive so we may safely clean |
3118 | * the queueable objects here in case they failed to get cleaned so far. |
3119 | */ |
3120 | if (IS_PF(bp)) |
3121 | bnx2x_squeeze_objects(bp); |
3122 | |
3123 | /* There should be no more pending SP commands at this stage */ |
3124 | bp->sp_state = 0; |
3125 | |
3126 | bp->port.pmf = 0; |
3127 | |
3128 | /* clear pending work in rtnl task */ |
3129 | bp->sp_rtnl_state = 0; |
3130 | smp_mb(); |
3131 | |
3132 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
3133 | bnx2x_free_skbs(bp); |
3134 | if (CNIC_LOADED(bp)) |
3135 | bnx2x_free_skbs_cnic(bp); |
3136 | for_each_rx_queue(bp, i) |
3137 | bnx2x_free_rx_sge_range(bp, fp: bp->fp + i, NUM_RX_SGE); |
3138 | |
3139 | bnx2x_free_fp_mem(bp); |
3140 | if (CNIC_LOADED(bp)) |
3141 | bnx2x_free_fp_mem_cnic(bp); |
3142 | |
3143 | if (IS_PF(bp)) { |
3144 | if (CNIC_LOADED(bp)) |
3145 | bnx2x_free_mem_cnic(bp); |
3146 | } |
3147 | bnx2x_free_mem(bp); |
3148 | |
3149 | bp->state = BNX2X_STATE_CLOSED; |
3150 | bp->cnic_loaded = false; |
3151 | |
3152 | /* Clear driver version indication in shmem */ |
3153 | if (IS_PF(bp) && !BP_NOMCP(bp)) |
3154 | bnx2x_update_mng_version(bp); |
3155 | |
3156 | /* Check if there are pending parity attentions. If there are - set |
3157 | * RECOVERY_IN_PROGRESS. |
3158 | */ |
3159 | if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, global: &global, print: false)) { |
3160 | bnx2x_set_reset_in_progress(bp); |
3161 | |
3162 | /* Set RESET_IS_GLOBAL if needed */ |
3163 | if (global) |
3164 | bnx2x_set_reset_global(bp); |
3165 | } |
3166 | |
3167 | /* The last driver must disable a "close the gate" if there is no |
3168 | * parity attention or "process kill" pending. |
3169 | */ |
3170 | if (IS_PF(bp) && |
3171 | !bnx2x_clear_pf_load(bp) && |
3172 | bnx2x_reset_is_done(bp, BP_PATH(bp))) |
3173 | bnx2x_disable_close_the_gate(bp); |
3174 | |
3175 | DP(NETIF_MSG_IFUP, "Ending NIC unload\n" ); |
3176 | |
3177 | return 0; |
3178 | } |
3179 | |
3180 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) |
3181 | { |
3182 | u16 pmcsr; |
3183 | |
3184 | /* If there is no power capability, silently succeed */ |
3185 | if (!bp->pdev->pm_cap) { |
3186 | BNX2X_DEV_INFO("No power capability. Breaking.\n" ); |
3187 | return 0; |
3188 | } |
3189 | |
3190 | pci_read_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, val: &pmcsr); |
3191 | |
3192 | switch (state) { |
3193 | case PCI_D0: |
3194 | pci_write_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, |
3195 | val: ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | |
3196 | PCI_PM_CTRL_PME_STATUS)); |
3197 | |
3198 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) |
3199 | /* delay required during transition out of D3hot */ |
3200 | msleep(msecs: 20); |
3201 | break; |
3202 | |
3203 | case PCI_D3hot: |
3204 | /* If there are other clients above don't |
3205 | shut down the power */ |
3206 | if (atomic_read(v: &bp->pdev->enable_cnt) != 1) |
3207 | return 0; |
3208 | /* Don't shut down the power for emulation and FPGA */ |
3209 | if (CHIP_REV_IS_SLOW(bp)) |
3210 | return 0; |
3211 | |
3212 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3213 | pmcsr |= 3; |
3214 | |
3215 | if (bp->wol) |
3216 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; |
3217 | |
3218 | pci_write_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, |
3219 | val: pmcsr); |
3220 | |
3221 | /* No more memory access after this point until |
3222 | * device is brought back to D0. |
3223 | */ |
3224 | break; |
3225 | |
3226 | default: |
3227 | dev_err(&bp->pdev->dev, "Can't support state = %d\n" , state); |
3228 | return -EINVAL; |
3229 | } |
3230 | return 0; |
3231 | } |
3232 | |
3233 | /* |
3234 | * net_device service functions |
3235 | */ |
3236 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
3237 | { |
3238 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
3239 | napi); |
3240 | struct bnx2x *bp = fp->bp; |
3241 | int rx_work_done; |
3242 | u8 cos; |
3243 | |
3244 | #ifdef BNX2X_STOP_ON_ERROR |
3245 | if (unlikely(bp->panic)) { |
3246 | napi_complete(napi); |
3247 | return 0; |
3248 | } |
3249 | #endif |
3250 | for_each_cos_in_tx_queue(fp, cos) |
3251 | if (bnx2x_tx_queue_has_work(txdata: fp->txdata_ptr[cos])) |
3252 | bnx2x_tx_int(bp, txdata: fp->txdata_ptr[cos]); |
3253 | |
3254 | rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; |
3255 | |
3256 | if (rx_work_done < budget) { |
3257 | /* No need to update SB for FCoE L2 ring as long as |
3258 | * it's connected to the default SB and the SB |
3259 | * has been updated when NAPI was scheduled. |
3260 | */ |
3261 | if (IS_FCOE_FP(fp)) { |
3262 | napi_complete_done(n: napi, work_done: rx_work_done); |
3263 | } else { |
3264 | bnx2x_update_fpsb_idx(fp); |
3265 | /* bnx2x_has_rx_work() reads the status block, |
3266 | * thus we need to ensure that status block indices |
3267 | * have been actually read (bnx2x_update_fpsb_idx) |
3268 | * prior to this check (bnx2x_has_rx_work) so that |
3269 | * we won't write the "newer" value of the status block |
3270 | * to IGU (if there was a DMA right after |
3271 | * bnx2x_has_rx_work and if there is no rmb, the memory |
3272 | * reading (bnx2x_update_fpsb_idx) may be postponed |
3273 | * to right before bnx2x_ack_sb). In this case there |
3274 | * will never be another interrupt until there is |
3275 | * another update of the status block, while there |
3276 | * is still unhandled work. |
3277 | */ |
3278 | rmb(); |
3279 | |
3280 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
3281 | if (napi_complete_done(n: napi, work_done: rx_work_done)) { |
3282 | /* Re-enable interrupts */ |
3283 | DP(NETIF_MSG_RX_STATUS, |
3284 | "Update index to %d\n" , fp->fp_hc_idx); |
3285 | bnx2x_ack_sb(bp, igu_sb_id: fp->igu_sb_id, storm: USTORM_ID, |
3286 | le16_to_cpu(fp->fp_hc_idx), |
3287 | op: IGU_INT_ENABLE, update: 1); |
3288 | } |
3289 | } else { |
3290 | rx_work_done = budget; |
3291 | } |
3292 | } |
3293 | } |
3294 | |
3295 | return rx_work_done; |
3296 | } |
3297 | |
3298 | /* we split the first BD into headers and data BDs |
3299 | * to ease the pain of our fellow microcode engineers |
3300 | * we use one mapping for both BDs |
3301 | */ |
3302 | static u16 bnx2x_tx_split(struct bnx2x *bp, |
3303 | struct bnx2x_fp_txdata *txdata, |
3304 | struct sw_tx_bd *tx_buf, |
3305 | struct eth_tx_start_bd **tx_bd, u16 hlen, |
3306 | u16 bd_prod) |
3307 | { |
3308 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; |
3309 | struct eth_tx_bd *d_tx_bd; |
3310 | dma_addr_t mapping; |
3311 | int old_len = le16_to_cpu(h_tx_bd->nbytes); |
3312 | |
3313 | /* first fix first BD */ |
3314 | h_tx_bd->nbytes = cpu_to_le16(hlen); |
3315 | |
3316 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n" , |
3317 | h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); |
3318 | |
3319 | /* now get a new data BD |
3320 | * (after the pbd) and fill it */ |
3321 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3322 | d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
3323 | |
3324 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), |
3325 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; |
3326 | |
3327 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
3328 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
3329 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); |
3330 | |
3331 | /* this marks the BD as one that has no individual mapping */ |
3332 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; |
3333 | |
3334 | DP(NETIF_MSG_TX_QUEUED, |
3335 | "TSO split data size is %d (%x:%x)\n" , |
3336 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); |
3337 | |
3338 | /* update tx_bd */ |
3339 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; |
3340 | |
3341 | return bd_prod; |
3342 | } |
3343 | |
3344 | #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) |
3345 | #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) |
3346 | static __le16 bnx2x_csum_fix(unsigned char *, u16 csum, s8 fix) |
3347 | { |
3348 | __sum16 tsum = (__force __sum16) csum; |
3349 | |
3350 | if (fix > 0) |
3351 | tsum = ~csum_fold(sum: csum_sub(csum: (__force __wsum) csum, |
3352 | addend: csum_partial(buff: t_header - fix, len: fix, sum: 0))); |
3353 | |
3354 | else if (fix < 0) |
3355 | tsum = ~csum_fold(sum: csum_add(csum: (__force __wsum) csum, |
3356 | addend: csum_partial(buff: t_header, len: -fix, sum: 0))); |
3357 | |
3358 | return bswab16(tsum); |
3359 | } |
3360 | |
3361 | static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) |
3362 | { |
3363 | u32 rc; |
3364 | __u8 prot = 0; |
3365 | __be16 protocol; |
3366 | |
3367 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
3368 | return XMIT_PLAIN; |
3369 | |
3370 | protocol = vlan_get_protocol(skb); |
3371 | if (protocol == htons(ETH_P_IPV6)) { |
3372 | rc = XMIT_CSUM_V6; |
3373 | prot = ipv6_hdr(skb)->nexthdr; |
3374 | } else { |
3375 | rc = XMIT_CSUM_V4; |
3376 | prot = ip_hdr(skb)->protocol; |
3377 | } |
3378 | |
3379 | if (!CHIP_IS_E1x(bp) && skb->encapsulation) { |
3380 | if (inner_ip_hdr(skb)->version == 6) { |
3381 | rc |= XMIT_CSUM_ENC_V6; |
3382 | if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
3383 | rc |= XMIT_CSUM_TCP; |
3384 | } else { |
3385 | rc |= XMIT_CSUM_ENC_V4; |
3386 | if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) |
3387 | rc |= XMIT_CSUM_TCP; |
3388 | } |
3389 | } |
3390 | if (prot == IPPROTO_TCP) |
3391 | rc |= XMIT_CSUM_TCP; |
3392 | |
3393 | if (skb_is_gso(skb)) { |
3394 | if (skb_is_gso_v6(skb)) { |
3395 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); |
3396 | if (rc & XMIT_CSUM_ENC) |
3397 | rc |= XMIT_GSO_ENC_V6; |
3398 | } else { |
3399 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); |
3400 | if (rc & XMIT_CSUM_ENC) |
3401 | rc |= XMIT_GSO_ENC_V4; |
3402 | } |
3403 | } |
3404 | |
3405 | return rc; |
3406 | } |
3407 | |
3408 | /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ |
3409 | #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 |
3410 | |
3411 | /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ |
3412 | #define BNX2X_NUM_TSO_WIN_SUB_BDS 3 |
3413 | |
3414 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) |
3415 | /* check if packet requires linearization (packet is too fragmented) |
3416 | no need to check fragmentation if page size > 8K (there will be no |
3417 | violation to FW restrictions) */ |
3418 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, |
3419 | u32 xmit_type) |
3420 | { |
3421 | int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; |
3422 | int to_copy = 0, hlen = 0; |
3423 | |
3424 | if (xmit_type & XMIT_GSO_ENC) |
3425 | num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; |
3426 | |
3427 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { |
3428 | if (xmit_type & XMIT_GSO) { |
3429 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; |
3430 | int wnd_size = MAX_FETCH_BD - num_tso_win_sub; |
3431 | /* Number of windows to check */ |
3432 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; |
3433 | int wnd_idx = 0; |
3434 | int frag_idx = 0; |
3435 | u32 wnd_sum = 0; |
3436 | |
3437 | /* Headers length */ |
3438 | if (xmit_type & XMIT_GSO_ENC) |
3439 | hlen = skb_inner_tcp_all_headers(skb); |
3440 | else |
3441 | hlen = skb_tcp_all_headers(skb); |
3442 | |
3443 | /* Amount of data (w/o headers) on linear part of SKB*/ |
3444 | first_bd_sz = skb_headlen(skb) - hlen; |
3445 | |
3446 | wnd_sum = first_bd_sz; |
3447 | |
3448 | /* Calculate the first sum - it's special */ |
3449 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) |
3450 | wnd_sum += |
3451 | skb_frag_size(frag: &skb_shinfo(skb)->frags[frag_idx]); |
3452 | |
3453 | /* If there was data on linear skb data - check it */ |
3454 | if (first_bd_sz > 0) { |
3455 | if (unlikely(wnd_sum < lso_mss)) { |
3456 | to_copy = 1; |
3457 | goto exit_lbl; |
3458 | } |
3459 | |
3460 | wnd_sum -= first_bd_sz; |
3461 | } |
3462 | |
3463 | /* Others are easier: run through the frag list and |
3464 | check all windows */ |
3465 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { |
3466 | wnd_sum += |
3467 | skb_frag_size(frag: &skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); |
3468 | |
3469 | if (unlikely(wnd_sum < lso_mss)) { |
3470 | to_copy = 1; |
3471 | break; |
3472 | } |
3473 | wnd_sum -= |
3474 | skb_frag_size(frag: &skb_shinfo(skb)->frags[wnd_idx]); |
3475 | } |
3476 | } else { |
3477 | /* in non-LSO too fragmented packet should always |
3478 | be linearized */ |
3479 | to_copy = 1; |
3480 | } |
3481 | } |
3482 | |
3483 | exit_lbl: |
3484 | if (unlikely(to_copy)) |
3485 | DP(NETIF_MSG_TX_QUEUED, |
3486 | "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n" , |
3487 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO" , |
3488 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); |
3489 | |
3490 | return to_copy; |
3491 | } |
3492 | #endif |
3493 | |
3494 | /** |
3495 | * bnx2x_set_pbd_gso - update PBD in GSO case. |
3496 | * |
3497 | * @skb: packet skb |
3498 | * @pbd: parse BD |
3499 | * @xmit_type: xmit flags |
3500 | */ |
3501 | static void bnx2x_set_pbd_gso(struct sk_buff *skb, |
3502 | struct eth_tx_parse_bd_e1x *pbd, |
3503 | u32 xmit_type) |
3504 | { |
3505 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
3506 | pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); |
3507 | pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); |
3508 | |
3509 | if (xmit_type & XMIT_GSO_V4) { |
3510 | pbd->ip_id = bswab16(ip_hdr(skb)->id); |
3511 | pbd->tcp_pseudo_csum = |
3512 | bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
3513 | ip_hdr(skb)->daddr, |
3514 | 0, IPPROTO_TCP, 0)); |
3515 | } else { |
3516 | pbd->tcp_pseudo_csum = |
3517 | bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3518 | &ipv6_hdr(skb)->daddr, |
3519 | 0, IPPROTO_TCP, 0)); |
3520 | } |
3521 | |
3522 | pbd->global_data |= |
3523 | cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); |
3524 | } |
3525 | |
3526 | /** |
3527 | * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length |
3528 | * |
3529 | * @bp: driver handle |
3530 | * @skb: packet skb |
3531 | * @parsing_data: data to be updated |
3532 | * @xmit_type: xmit flags |
3533 | * |
3534 | * 57712/578xx related, when skb has encapsulation |
3535 | */ |
3536 | static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, |
3537 | u32 *parsing_data, u32 xmit_type) |
3538 | { |
3539 | *parsing_data |= |
3540 | ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << |
3541 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & |
3542 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; |
3543 | |
3544 | if (xmit_type & XMIT_CSUM_TCP) { |
3545 | *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << |
3546 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & |
3547 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; |
3548 | |
3549 | return skb_inner_tcp_all_headers(skb); |
3550 | } |
3551 | |
3552 | /* We support checksum offload for TCP and UDP only. |
3553 | * No need to pass the UDP header length - it's a constant. |
3554 | */ |
3555 | return skb_inner_transport_offset(skb) + sizeof(struct udphdr); |
3556 | } |
3557 | |
3558 | /** |
3559 | * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length |
3560 | * |
3561 | * @bp: driver handle |
3562 | * @skb: packet skb |
3563 | * @parsing_data: data to be updated |
3564 | * @xmit_type: xmit flags |
3565 | * |
3566 | * 57712/578xx related |
3567 | */ |
3568 | static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, |
3569 | u32 *parsing_data, u32 xmit_type) |
3570 | { |
3571 | *parsing_data |= |
3572 | ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << |
3573 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & |
3574 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; |
3575 | |
3576 | if (xmit_type & XMIT_CSUM_TCP) { |
3577 | *parsing_data |= ((tcp_hdrlen(skb) / 4) << |
3578 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & |
3579 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; |
3580 | |
3581 | return skb_tcp_all_headers(skb); |
3582 | } |
3583 | /* We support checksum offload for TCP and UDP only. |
3584 | * No need to pass the UDP header length - it's a constant. |
3585 | */ |
3586 | return skb_transport_offset(skb) + sizeof(struct udphdr); |
3587 | } |
3588 | |
3589 | /* set FW indication according to inner or outer protocols if tunneled */ |
3590 | static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, |
3591 | struct eth_tx_start_bd *tx_start_bd, |
3592 | u32 xmit_type) |
3593 | { |
3594 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; |
3595 | |
3596 | if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) |
3597 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; |
3598 | |
3599 | if (!(xmit_type & XMIT_CSUM_TCP)) |
3600 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; |
3601 | } |
3602 | |
3603 | /** |
3604 | * bnx2x_set_pbd_csum - update PBD with checksum and return header length |
3605 | * |
3606 | * @bp: driver handle |
3607 | * @skb: packet skb |
3608 | * @pbd: parse BD to be updated |
3609 | * @xmit_type: xmit flags |
3610 | */ |
3611 | static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, |
3612 | struct eth_tx_parse_bd_e1x *pbd, |
3613 | u32 xmit_type) |
3614 | { |
3615 | u8 hlen = (skb_network_header(skb) - skb->data) >> 1; |
3616 | |
3617 | /* for now NS flag is not used in Linux */ |
3618 | pbd->global_data = |
3619 | cpu_to_le16(hlen | |
3620 | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << |
3621 | ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); |
3622 | |
3623 | pbd->ip_hlen_w = (skb_transport_header(skb) - |
3624 | skb_network_header(skb)) >> 1; |
3625 | |
3626 | hlen += pbd->ip_hlen_w; |
3627 | |
3628 | /* We support checksum offload for TCP and UDP only */ |
3629 | if (xmit_type & XMIT_CSUM_TCP) |
3630 | hlen += tcp_hdrlen(skb) / 2; |
3631 | else |
3632 | hlen += sizeof(struct udphdr) / 2; |
3633 | |
3634 | pbd->total_hlen_w = cpu_to_le16(hlen); |
3635 | hlen = hlen*2; |
3636 | |
3637 | if (xmit_type & XMIT_CSUM_TCP) { |
3638 | pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); |
3639 | |
3640 | } else { |
3641 | s8 fix = SKB_CS_OFF(skb); /* signed! */ |
3642 | |
3643 | DP(NETIF_MSG_TX_QUEUED, |
3644 | "hlen %d fix %d csum before fix %x\n" , |
3645 | le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); |
3646 | |
3647 | /* HW bug: fixup the CSUM */ |
3648 | pbd->tcp_pseudo_csum = |
3649 | bnx2x_csum_fix(t_header: skb_transport_header(skb), |
3650 | SKB_CS(skb), fix); |
3651 | |
3652 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n" , |
3653 | pbd->tcp_pseudo_csum); |
3654 | } |
3655 | |
3656 | return hlen; |
3657 | } |
3658 | |
3659 | static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, |
3660 | struct eth_tx_parse_bd_e2 *pbd_e2, |
3661 | struct eth_tx_parse_2nd_bd *pbd2, |
3662 | u16 *global_data, |
3663 | u32 xmit_type) |
3664 | { |
3665 | u16 hlen_w = 0; |
3666 | u8 outerip_off, outerip_len = 0; |
3667 | |
3668 | /* from outer IP to transport */ |
3669 | hlen_w = (skb_inner_transport_header(skb) - |
3670 | skb_network_header(skb)) >> 1; |
3671 | |
3672 | /* transport len */ |
3673 | hlen_w += inner_tcp_hdrlen(skb) >> 1; |
3674 | |
3675 | pbd2->fw_ip_hdr_to_payload_w = hlen_w; |
3676 | |
3677 | /* outer IP header info */ |
3678 | if (xmit_type & XMIT_CSUM_V4) { |
3679 | struct iphdr *iph = ip_hdr(skb); |
3680 | u32 csum = (__force u32)(~iph->check) - |
3681 | (__force u32)iph->tot_len - |
3682 | (__force u32)iph->frag_off; |
3683 | |
3684 | outerip_len = iph->ihl << 1; |
3685 | |
3686 | pbd2->fw_ip_csum_wo_len_flags_frag = |
3687 | bswab16(csum_fold((__force __wsum)csum)); |
3688 | } else { |
3689 | pbd2->fw_ip_hdr_to_payload_w = |
3690 | hlen_w - ((sizeof(struct ipv6hdr)) >> 1); |
3691 | pbd_e2->data.tunnel_data.flags |= |
3692 | ETH_TUNNEL_DATA_IPV6_OUTER; |
3693 | } |
3694 | |
3695 | pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); |
3696 | |
3697 | pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); |
3698 | |
3699 | /* inner IP header info */ |
3700 | if (xmit_type & XMIT_CSUM_ENC_V4) { |
3701 | pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); |
3702 | |
3703 | pbd_e2->data.tunnel_data.pseudo_csum = |
3704 | bswab16(~csum_tcpudp_magic( |
3705 | inner_ip_hdr(skb)->saddr, |
3706 | inner_ip_hdr(skb)->daddr, |
3707 | 0, IPPROTO_TCP, 0)); |
3708 | } else { |
3709 | pbd_e2->data.tunnel_data.pseudo_csum = |
3710 | bswab16(~csum_ipv6_magic( |
3711 | &inner_ipv6_hdr(skb)->saddr, |
3712 | &inner_ipv6_hdr(skb)->daddr, |
3713 | 0, IPPROTO_TCP, 0)); |
3714 | } |
3715 | |
3716 | outerip_off = (skb_network_header(skb) - skb->data) >> 1; |
3717 | |
3718 | *global_data |= |
3719 | outerip_off | |
3720 | (outerip_len << |
3721 | ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | |
3722 | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << |
3723 | ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); |
3724 | |
3725 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
3726 | SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); |
3727 | pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; |
3728 | } |
3729 | } |
3730 | |
3731 | static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, |
3732 | u32 xmit_type) |
3733 | { |
3734 | struct ipv6hdr *ipv6; |
3735 | |
3736 | if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) |
3737 | return; |
3738 | |
3739 | if (xmit_type & XMIT_GSO_ENC_V6) |
3740 | ipv6 = inner_ipv6_hdr(skb); |
3741 | else /* XMIT_GSO_V6 */ |
3742 | ipv6 = ipv6_hdr(skb); |
3743 | |
3744 | if (ipv6->nexthdr == NEXTHDR_IPV6) |
3745 | *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; |
3746 | } |
3747 | |
3748 | /* called with netif_tx_lock |
3749 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call |
3750 | * netif_wake_queue() |
3751 | */ |
3752 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3753 | { |
3754 | struct bnx2x *bp = netdev_priv(dev); |
3755 | |
3756 | struct netdev_queue *txq; |
3757 | struct bnx2x_fp_txdata *txdata; |
3758 | struct sw_tx_bd *tx_buf; |
3759 | struct eth_tx_start_bd *tx_start_bd, *first_bd; |
3760 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; |
3761 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
3762 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
3763 | struct eth_tx_parse_2nd_bd *pbd2 = NULL; |
3764 | u32 pbd_e2_parsing_data = 0; |
3765 | u16 pkt_prod, bd_prod; |
3766 | int nbd, txq_index; |
3767 | dma_addr_t mapping; |
3768 | u32 xmit_type = bnx2x_xmit_type(bp, skb); |
3769 | int i; |
3770 | u8 hlen = 0; |
3771 | __le16 pkt_size = 0; |
3772 | struct ethhdr *eth; |
3773 | u8 mac_type = UNICAST_ADDRESS; |
3774 | |
3775 | #ifdef BNX2X_STOP_ON_ERROR |
3776 | if (unlikely(bp->panic)) |
3777 | return NETDEV_TX_BUSY; |
3778 | #endif |
3779 | |
3780 | txq_index = skb_get_queue_mapping(skb); |
3781 | txq = netdev_get_tx_queue(dev, index: txq_index); |
3782 | |
3783 | BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); |
3784 | |
3785 | txdata = &bp->bnx2x_txq[txq_index]; |
3786 | |
3787 | /* enable this debug print to view the transmission queue being used |
3788 | DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", |
3789 | txq_index, fp_index, txdata_index); */ |
3790 | |
3791 | /* enable this debug print to view the transmission details |
3792 | DP(NETIF_MSG_TX_QUEUED, |
3793 | "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", |
3794 | txdata->cid, fp_index, txdata_index, txdata, fp); */ |
3795 | |
3796 | if (unlikely(bnx2x_tx_avail(bp, txdata) < |
3797 | skb_shinfo(skb)->nr_frags + |
3798 | BDS_PER_TX_PKT + |
3799 | NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { |
3800 | /* Handle special storage cases separately */ |
3801 | if (txdata->tx_ring_size == 0) { |
3802 | struct bnx2x_eth_q_stats *q_stats = |
3803 | bnx2x_fp_qstats(bp, txdata->parent_fp); |
3804 | q_stats->driver_filtered_tx_pkt++; |
3805 | dev_kfree_skb(skb); |
3806 | return NETDEV_TX_OK; |
3807 | } |
3808 | bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; |
3809 | netif_tx_stop_queue(dev_queue: txq); |
3810 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n" ); |
3811 | |
3812 | return NETDEV_TX_BUSY; |
3813 | } |
3814 | |
3815 | DP(NETIF_MSG_TX_QUEUED, |
3816 | "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n" , |
3817 | txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, |
3818 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, |
3819 | skb->len); |
3820 | |
3821 | eth = (struct ethhdr *)skb->data; |
3822 | |
3823 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ |
3824 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { |
3825 | if (is_broadcast_ether_addr(addr: eth->h_dest)) |
3826 | mac_type = BROADCAST_ADDRESS; |
3827 | else |
3828 | mac_type = MULTICAST_ADDRESS; |
3829 | } |
3830 | |
3831 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) |
3832 | /* First, check if we need to linearize the skb (due to FW |
3833 | restrictions). No need to check fragmentation if page size > 8K |
3834 | (there will be no violation to FW restrictions) */ |
3835 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { |
3836 | /* Statistics of linearization */ |
3837 | bp->lin_cnt++; |
3838 | if (skb_linearize(skb) != 0) { |
3839 | DP(NETIF_MSG_TX_QUEUED, |
3840 | "SKB linearization failed - silently dropping this SKB\n" ); |
3841 | dev_kfree_skb_any(skb); |
3842 | return NETDEV_TX_OK; |
3843 | } |
3844 | } |
3845 | #endif |
3846 | /* Map skb linear data for DMA */ |
3847 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
3848 | skb_headlen(skb), DMA_TO_DEVICE); |
3849 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
3850 | DP(NETIF_MSG_TX_QUEUED, |
3851 | "SKB mapping failed - silently dropping this SKB\n" ); |
3852 | dev_kfree_skb_any(skb); |
3853 | return NETDEV_TX_OK; |
3854 | } |
3855 | /* |
3856 | Please read carefully. First we use one BD which we mark as start, |
3857 | then we have a parsing info BD (used for TSO or xsum), |
3858 | and only then we have the rest of the TSO BDs. |
3859 | (don't forget to mark the last one as last, |
3860 | and to unmap only AFTER you write to the BD ...) |
3861 | And above all, all pdb sizes are in words - NOT DWORDS! |
3862 | */ |
3863 | |
3864 | /* get current pkt produced now - advance it just before sending packet |
3865 | * since mapping of pages may fail and cause packet to be dropped |
3866 | */ |
3867 | pkt_prod = txdata->tx_pkt_prod; |
3868 | bd_prod = TX_BD(txdata->tx_bd_prod); |
3869 | |
3870 | /* get a tx_buf and first BD |
3871 | * tx_start_bd may be changed during SPLIT, |
3872 | * but first_bd will always stay first |
3873 | */ |
3874 | tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; |
3875 | tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; |
3876 | first_bd = tx_start_bd; |
3877 | |
3878 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
3879 | |
3880 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
3881 | if (!(bp->flags & TX_TIMESTAMPING_EN)) { |
3882 | bp->eth_stats.ptp_skip_tx_ts++; |
3883 | BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n" ); |
3884 | } else if (bp->ptp_tx_skb) { |
3885 | bp->eth_stats.ptp_skip_tx_ts++; |
3886 | netdev_err_once(bp->dev, |
3887 | "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n" ); |
3888 | } else { |
3889 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
3890 | /* schedule check for Tx timestamp */ |
3891 | bp->ptp_tx_skb = skb_get(skb); |
3892 | bp->ptp_tx_start = jiffies; |
3893 | schedule_work(work: &bp->ptp_task); |
3894 | } |
3895 | } |
3896 | |
3897 | /* header nbd: indirectly zero other flags! */ |
3898 | tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; |
3899 | |
3900 | /* remember the first BD of the packet */ |
3901 | tx_buf->first_bd = txdata->tx_bd_prod; |
3902 | tx_buf->skb = skb; |
3903 | tx_buf->flags = 0; |
3904 | |
3905 | DP(NETIF_MSG_TX_QUEUED, |
3906 | "sending pkt %u @%p next_idx %u bd %u @%p\n" , |
3907 | pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); |
3908 | |
3909 | if (skb_vlan_tag_present(skb)) { |
3910 | tx_start_bd->vlan_or_ethertype = |
3911 | cpu_to_le16(skb_vlan_tag_get(skb)); |
3912 | tx_start_bd->bd_flags.as_bitfield |= |
3913 | (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
3914 | } else { |
3915 | /* when transmitting in a vf, start bd must hold the ethertype |
3916 | * for fw to enforce it |
3917 | */ |
3918 | u16 vlan_tci = 0; |
3919 | #ifndef BNX2X_STOP_ON_ERROR |
3920 | if (IS_VF(bp)) { |
3921 | #endif |
3922 | /* Still need to consider inband vlan for enforced */ |
3923 | if (__vlan_get_tag(skb, vlan_tci: &vlan_tci)) { |
3924 | tx_start_bd->vlan_or_ethertype = |
3925 | cpu_to_le16(ntohs(eth->h_proto)); |
3926 | } else { |
3927 | tx_start_bd->bd_flags.as_bitfield |= |
3928 | (X_ETH_INBAND_VLAN << |
3929 | ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
3930 | tx_start_bd->vlan_or_ethertype = |
3931 | cpu_to_le16(vlan_tci); |
3932 | } |
3933 | #ifndef BNX2X_STOP_ON_ERROR |
3934 | } else { |
3935 | /* used by FW for packet accounting */ |
3936 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
3937 | } |
3938 | #endif |
3939 | } |
3940 | |
3941 | nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ |
3942 | |
3943 | /* turn on parsing and get a BD */ |
3944 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3945 | |
3946 | if (xmit_type & XMIT_CSUM) |
3947 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); |
3948 | |
3949 | if (!CHIP_IS_E1x(bp)) { |
3950 | pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; |
3951 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); |
3952 | |
3953 | if (xmit_type & XMIT_CSUM_ENC) { |
3954 | u16 global_data = 0; |
3955 | |
3956 | /* Set PBD in enc checksum offload case */ |
3957 | hlen = bnx2x_set_pbd_csum_enc(bp, skb, |
3958 | parsing_data: &pbd_e2_parsing_data, |
3959 | xmit_type); |
3960 | |
3961 | /* turn on 2nd parsing and get a BD */ |
3962 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3963 | |
3964 | pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; |
3965 | |
3966 | memset(pbd2, 0, sizeof(*pbd2)); |
3967 | |
3968 | pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = |
3969 | (skb_inner_network_header(skb) - |
3970 | skb->data) >> 1; |
3971 | |
3972 | if (xmit_type & XMIT_GSO_ENC) |
3973 | bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, |
3974 | global_data: &global_data, |
3975 | xmit_type); |
3976 | |
3977 | pbd2->global_data = cpu_to_le16(global_data); |
3978 | |
3979 | /* add addition parse BD indication to start BD */ |
3980 | SET_FLAG(tx_start_bd->general_data, |
3981 | ETH_TX_START_BD_PARSE_NBDS, 1); |
3982 | /* set encapsulation flag in start BD */ |
3983 | SET_FLAG(tx_start_bd->general_data, |
3984 | ETH_TX_START_BD_TUNNEL_EXIST, 1); |
3985 | |
3986 | tx_buf->flags |= BNX2X_HAS_SECOND_PBD; |
3987 | |
3988 | nbd++; |
3989 | } else if (xmit_type & XMIT_CSUM) { |
3990 | /* Set PBD in checksum offload case w/o encapsulation */ |
3991 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, |
3992 | parsing_data: &pbd_e2_parsing_data, |
3993 | xmit_type); |
3994 | } |
3995 | |
3996 | bnx2x_set_ipv6_ext_e2(skb, parsing_data: &pbd_e2_parsing_data, xmit_type); |
3997 | /* Add the macs to the parsing BD if this is a vf or if |
3998 | * Tx Switching is enabled. |
3999 | */ |
4000 | if (IS_VF(bp)) { |
4001 | /* override GRE parameters in BD */ |
4002 | bnx2x_set_fw_mac_addr(fw_hi: &pbd_e2->data.mac_addr.src_hi, |
4003 | fw_mid: &pbd_e2->data.mac_addr.src_mid, |
4004 | fw_lo: &pbd_e2->data.mac_addr.src_lo, |
4005 | mac: eth->h_source); |
4006 | |
4007 | bnx2x_set_fw_mac_addr(fw_hi: &pbd_e2->data.mac_addr.dst_hi, |
4008 | fw_mid: &pbd_e2->data.mac_addr.dst_mid, |
4009 | fw_lo: &pbd_e2->data.mac_addr.dst_lo, |
4010 | mac: eth->h_dest); |
4011 | } else { |
4012 | if (bp->flags & TX_SWITCHING) |
4013 | bnx2x_set_fw_mac_addr( |
4014 | fw_hi: &pbd_e2->data.mac_addr.dst_hi, |
4015 | fw_mid: &pbd_e2->data.mac_addr.dst_mid, |
4016 | fw_lo: &pbd_e2->data.mac_addr.dst_lo, |
4017 | mac: eth->h_dest); |
4018 | #ifdef BNX2X_STOP_ON_ERROR |
4019 | /* Enforce security is always set in Stop on Error - |
4020 | * source mac should be present in the parsing BD |
4021 | */ |
4022 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, |
4023 | &pbd_e2->data.mac_addr.src_mid, |
4024 | &pbd_e2->data.mac_addr.src_lo, |
4025 | eth->h_source); |
4026 | #endif |
4027 | } |
4028 | |
4029 | SET_FLAG(pbd_e2_parsing_data, |
4030 | ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); |
4031 | } else { |
4032 | u16 global_data = 0; |
4033 | pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; |
4034 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); |
4035 | /* Set PBD in checksum offload case */ |
4036 | if (xmit_type & XMIT_CSUM) |
4037 | hlen = bnx2x_set_pbd_csum(bp, skb, pbd: pbd_e1x, xmit_type); |
4038 | |
4039 | SET_FLAG(global_data, |
4040 | ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); |
4041 | pbd_e1x->global_data |= cpu_to_le16(global_data); |
4042 | } |
4043 | |
4044 | /* Setup the data pointer of the first BD of the packet */ |
4045 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
4046 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
4047 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
4048 | pkt_size = tx_start_bd->nbytes; |
4049 | |
4050 | DP(NETIF_MSG_TX_QUEUED, |
4051 | "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n" , |
4052 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, |
4053 | le16_to_cpu(tx_start_bd->nbytes), |
4054 | tx_start_bd->bd_flags.as_bitfield, |
4055 | le16_to_cpu(tx_start_bd->vlan_or_ethertype)); |
4056 | |
4057 | if (xmit_type & XMIT_GSO) { |
4058 | |
4059 | DP(NETIF_MSG_TX_QUEUED, |
4060 | "TSO packet len %d hlen %d total len %d tso size %d\n" , |
4061 | skb->len, hlen, skb_headlen(skb), |
4062 | skb_shinfo(skb)->gso_size); |
4063 | |
4064 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; |
4065 | |
4066 | if (unlikely(skb_headlen(skb) > hlen)) { |
4067 | nbd++; |
4068 | bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, |
4069 | tx_bd: &tx_start_bd, hlen, |
4070 | bd_prod); |
4071 | } |
4072 | if (!CHIP_IS_E1x(bp)) |
4073 | pbd_e2_parsing_data |= |
4074 | (skb_shinfo(skb)->gso_size << |
4075 | ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & |
4076 | ETH_TX_PARSE_BD_E2_LSO_MSS; |
4077 | else |
4078 | bnx2x_set_pbd_gso(skb, pbd: pbd_e1x, xmit_type); |
4079 | } |
4080 | |
4081 | /* Set the PBD's parsing_data field if not zero |
4082 | * (for the chips newer than 57711). |
4083 | */ |
4084 | if (pbd_e2_parsing_data) |
4085 | pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); |
4086 | |
4087 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; |
4088 | |
4089 | /* Handle fragmented skb */ |
4090 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
4091 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4092 | |
4093 | mapping = skb_frag_dma_map(dev: &bp->pdev->dev, frag, offset: 0, |
4094 | size: skb_frag_size(frag), dir: DMA_TO_DEVICE); |
4095 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
4096 | unsigned int pkts_compl = 0, bytes_compl = 0; |
4097 | |
4098 | DP(NETIF_MSG_TX_QUEUED, |
4099 | "Unable to map page - dropping packet...\n" ); |
4100 | |
4101 | /* we need unmap all buffers already mapped |
4102 | * for this SKB; |
4103 | * first_bd->nbd need to be properly updated |
4104 | * before call to bnx2x_free_tx_pkt |
4105 | */ |
4106 | first_bd->nbd = cpu_to_le16(nbd); |
4107 | bnx2x_free_tx_pkt(bp, txdata, |
4108 | TX_BD(txdata->tx_pkt_prod), |
4109 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
4110 | return NETDEV_TX_OK; |
4111 | } |
4112 | |
4113 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
4114 | tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
4115 | if (total_pkt_bd == NULL) |
4116 | total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
4117 | |
4118 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
4119 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
4120 | tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); |
4121 | le16_add_cpu(var: &pkt_size, val: skb_frag_size(frag)); |
4122 | nbd++; |
4123 | |
4124 | DP(NETIF_MSG_TX_QUEUED, |
4125 | "frag %d bd @%p addr (%x:%x) nbytes %d\n" , |
4126 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, |
4127 | le16_to_cpu(tx_data_bd->nbytes)); |
4128 | } |
4129 | |
4130 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n" , tx_data_bd); |
4131 | |
4132 | /* update with actual num BDs */ |
4133 | first_bd->nbd = cpu_to_le16(nbd); |
4134 | |
4135 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
4136 | |
4137 | /* now send a tx doorbell, counting the next BD |
4138 | * if the packet contains or ends with it |
4139 | */ |
4140 | if (TX_BD_POFF(bd_prod) < nbd) |
4141 | nbd++; |
4142 | |
4143 | /* total_pkt_bytes should be set on the first data BD if |
4144 | * it's not an LSO packet and there is more than one |
4145 | * data BD. In this case pkt_size is limited by an MTU value. |
4146 | * However we prefer to set it for an LSO packet (while we don't |
4147 | * have to) in order to save some CPU cycles in a none-LSO |
4148 | * case, when we much more care about them. |
4149 | */ |
4150 | if (total_pkt_bd != NULL) |
4151 | total_pkt_bd->total_pkt_bytes = pkt_size; |
4152 | |
4153 | if (pbd_e1x) |
4154 | DP(NETIF_MSG_TX_QUEUED, |
4155 | "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n" , |
4156 | pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, |
4157 | pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, |
4158 | pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, |
4159 | le16_to_cpu(pbd_e1x->total_hlen_w)); |
4160 | if (pbd_e2) |
4161 | DP(NETIF_MSG_TX_QUEUED, |
4162 | "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n" , |
4163 | pbd_e2, |
4164 | pbd_e2->data.mac_addr.dst_hi, |
4165 | pbd_e2->data.mac_addr.dst_mid, |
4166 | pbd_e2->data.mac_addr.dst_lo, |
4167 | pbd_e2->data.mac_addr.src_hi, |
4168 | pbd_e2->data.mac_addr.src_mid, |
4169 | pbd_e2->data.mac_addr.src_lo, |
4170 | pbd_e2->parsing_data); |
4171 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n" , nbd, bd_prod); |
4172 | |
4173 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
4174 | |
4175 | skb_tx_timestamp(skb); |
4176 | |
4177 | txdata->tx_pkt_prod++; |
4178 | /* |
4179 | * Make sure that the BD data is updated before updating the producer |
4180 | * since FW might read the BD right after the producer is updated. |
4181 | * This is only applicable for weak-ordered memory model archs such |
4182 | * as IA-64. The following barrier is also mandatory since FW will |
4183 | * assumes packets must have BDs. |
4184 | */ |
4185 | wmb(); |
4186 | |
4187 | txdata->tx_db.data.prod += nbd; |
4188 | /* make sure descriptor update is observed by HW */ |
4189 | wmb(); |
4190 | |
4191 | DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); |
4192 | |
4193 | txdata->tx_bd_prod += nbd; |
4194 | |
4195 | if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { |
4196 | netif_tx_stop_queue(dev_queue: txq); |
4197 | |
4198 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep |
4199 | * ordering of set_bit() in netif_tx_stop_queue() and read of |
4200 | * fp->bd_tx_cons */ |
4201 | smp_mb(); |
4202 | |
4203 | bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; |
4204 | if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) |
4205 | netif_tx_wake_queue(dev_queue: txq); |
4206 | } |
4207 | txdata->tx_pkt++; |
4208 | |
4209 | return NETDEV_TX_OK; |
4210 | } |
4211 | |
4212 | void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) |
4213 | { |
4214 | int mfw_vn = BP_FW_MB_IDX(bp); |
4215 | u32 tmp; |
4216 | |
4217 | /* If the shmem shouldn't affect configuration, reflect */ |
4218 | if (!IS_MF_BD(bp)) { |
4219 | int i; |
4220 | |
4221 | for (i = 0; i < BNX2X_MAX_PRIORITY; i++) |
4222 | c2s_map[i] = i; |
4223 | *c2s_default = 0; |
4224 | |
4225 | return; |
4226 | } |
4227 | |
4228 | tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); |
4229 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4230 | c2s_map[0] = tmp & 0xff; |
4231 | c2s_map[1] = (tmp >> 8) & 0xff; |
4232 | c2s_map[2] = (tmp >> 16) & 0xff; |
4233 | c2s_map[3] = (tmp >> 24) & 0xff; |
4234 | |
4235 | tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); |
4236 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4237 | c2s_map[4] = tmp & 0xff; |
4238 | c2s_map[5] = (tmp >> 8) & 0xff; |
4239 | c2s_map[6] = (tmp >> 16) & 0xff; |
4240 | c2s_map[7] = (tmp >> 24) & 0xff; |
4241 | |
4242 | tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); |
4243 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4244 | *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; |
4245 | } |
4246 | |
4247 | /** |
4248 | * bnx2x_setup_tc - routine to configure net_device for multi tc |
4249 | * |
4250 | * @dev: net device to configure |
4251 | * @num_tc: number of traffic classes to enable |
4252 | * |
4253 | * callback connected to the ndo_setup_tc function pointer |
4254 | */ |
4255 | int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) |
4256 | { |
4257 | struct bnx2x *bp = netdev_priv(dev); |
4258 | u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; |
4259 | int cos, prio, count, offset; |
4260 | |
4261 | /* setup tc must be called under rtnl lock */ |
4262 | ASSERT_RTNL(); |
4263 | |
4264 | /* no traffic classes requested. Aborting */ |
4265 | if (!num_tc) { |
4266 | netdev_reset_tc(dev); |
4267 | return 0; |
4268 | } |
4269 | |
4270 | /* requested to support too many traffic classes */ |
4271 | if (num_tc > bp->max_cos) { |
4272 | BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n" , |
4273 | num_tc, bp->max_cos); |
4274 | return -EINVAL; |
4275 | } |
4276 | |
4277 | /* declare amount of supported traffic classes */ |
4278 | if (netdev_set_num_tc(dev, num_tc)) { |
4279 | BNX2X_ERR("failed to declare %d traffic classes\n" , num_tc); |
4280 | return -EINVAL; |
4281 | } |
4282 | |
4283 | bnx2x_get_c2s_mapping(bp, c2s_map, c2s_default: &c2s_def); |
4284 | |
4285 | /* configure priority to traffic class mapping */ |
4286 | for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { |
4287 | int outer_prio = c2s_map[prio]; |
4288 | |
4289 | netdev_set_prio_tc_map(dev, prio, tc: bp->prio_to_cos[outer_prio]); |
4290 | DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, |
4291 | "mapping priority %d to tc %d\n" , |
4292 | outer_prio, bp->prio_to_cos[outer_prio]); |
4293 | } |
4294 | |
4295 | /* Use this configuration to differentiate tc0 from other COSes |
4296 | This can be used for ets or pfc, and save the effort of setting |
4297 | up a multio class queue disc or negotiating DCBX with a switch |
4298 | netdev_set_prio_tc_map(dev, 0, 0); |
4299 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); |
4300 | for (prio = 1; prio < 16; prio++) { |
4301 | netdev_set_prio_tc_map(dev, prio, 1); |
4302 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); |
4303 | } */ |
4304 | |
4305 | /* configure traffic class to transmission queue mapping */ |
4306 | for (cos = 0; cos < bp->max_cos; cos++) { |
4307 | count = BNX2X_NUM_ETH_QUEUES(bp); |
4308 | offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); |
4309 | netdev_set_tc_queue(dev, tc: cos, count, offset); |
4310 | DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, |
4311 | "mapping tc %d to offset %d count %d\n" , |
4312 | cos, offset, count); |
4313 | } |
4314 | |
4315 | return 0; |
4316 | } |
4317 | |
4318 | int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, |
4319 | void *type_data) |
4320 | { |
4321 | struct tc_mqprio_qopt *mqprio = type_data; |
4322 | |
4323 | if (type != TC_SETUP_QDISC_MQPRIO) |
4324 | return -EOPNOTSUPP; |
4325 | |
4326 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
4327 | |
4328 | return bnx2x_setup_tc(dev, num_tc: mqprio->num_tc); |
4329 | } |
4330 | |
4331 | /* called with rtnl_lock */ |
4332 | int bnx2x_change_mac_addr(struct net_device *dev, void *p) |
4333 | { |
4334 | struct sockaddr *addr = p; |
4335 | struct bnx2x *bp = netdev_priv(dev); |
4336 | int rc = 0; |
4337 | |
4338 | if (!is_valid_ether_addr(addr: addr->sa_data)) { |
4339 | BNX2X_ERR("Requested MAC address is not valid\n" ); |
4340 | return -EINVAL; |
4341 | } |
4342 | |
4343 | if (IS_MF_STORAGE_ONLY(bp)) { |
4344 | BNX2X_ERR("Can't change address on STORAGE ONLY function\n" ); |
4345 | return -EINVAL; |
4346 | } |
4347 | |
4348 | if (netif_running(dev)) { |
4349 | rc = bnx2x_set_eth_mac(bp, set: false); |
4350 | if (rc) |
4351 | return rc; |
4352 | } |
4353 | |
4354 | eth_hw_addr_set(dev, addr: addr->sa_data); |
4355 | |
4356 | if (netif_running(dev)) |
4357 | rc = bnx2x_set_eth_mac(bp, set: true); |
4358 | |
4359 | if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) |
4360 | SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); |
4361 | |
4362 | return rc; |
4363 | } |
4364 | |
4365 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) |
4366 | { |
4367 | union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); |
4368 | struct bnx2x_fastpath *fp = &bp->fp[fp_index]; |
4369 | u8 cos; |
4370 | |
4371 | /* Common */ |
4372 | |
4373 | if (IS_FCOE_IDX(fp_index)) { |
4374 | memset(sb, 0, sizeof(union host_hc_status_block)); |
4375 | fp->status_blk_mapping = 0; |
4376 | } else { |
4377 | /* status blocks */ |
4378 | if (!CHIP_IS_E1x(bp)) |
4379 | BNX2X_PCI_FREE(sb->e2_sb, |
4380 | bnx2x_fp(bp, fp_index, |
4381 | status_blk_mapping), |
4382 | sizeof(struct host_hc_status_block_e2)); |
4383 | else |
4384 | BNX2X_PCI_FREE(sb->e1x_sb, |
4385 | bnx2x_fp(bp, fp_index, |
4386 | status_blk_mapping), |
4387 | sizeof(struct host_hc_status_block_e1x)); |
4388 | } |
4389 | |
4390 | /* Rx */ |
4391 | if (!skip_rx_queue(bp, fp_index)) { |
4392 | bnx2x_free_rx_bds(fp); |
4393 | |
4394 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
4395 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); |
4396 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), |
4397 | bnx2x_fp(bp, fp_index, rx_desc_mapping), |
4398 | sizeof(struct eth_rx_bd) * NUM_RX_BD); |
4399 | |
4400 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), |
4401 | bnx2x_fp(bp, fp_index, rx_comp_mapping), |
4402 | sizeof(struct eth_fast_path_rx_cqe) * |
4403 | NUM_RCQ_BD); |
4404 | |
4405 | /* SGE ring */ |
4406 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); |
4407 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), |
4408 | bnx2x_fp(bp, fp_index, rx_sge_mapping), |
4409 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
4410 | } |
4411 | |
4412 | /* Tx */ |
4413 | if (!skip_tx_queue(bp, fp_index)) { |
4414 | /* fastpath tx rings: tx_buf tx_desc */ |
4415 | for_each_cos_in_tx_queue(fp, cos) { |
4416 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
4417 | |
4418 | DP(NETIF_MSG_IFDOWN, |
4419 | "freeing tx memory of fp %d cos %d cid %d\n" , |
4420 | fp_index, cos, txdata->cid); |
4421 | |
4422 | BNX2X_FREE(txdata->tx_buf_ring); |
4423 | BNX2X_PCI_FREE(txdata->tx_desc_ring, |
4424 | txdata->tx_desc_mapping, |
4425 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
4426 | } |
4427 | } |
4428 | /* end of fastpath */ |
4429 | } |
4430 | |
4431 | static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) |
4432 | { |
4433 | int i; |
4434 | for_each_cnic_queue(bp, i) |
4435 | bnx2x_free_fp_mem_at(bp, fp_index: i); |
4436 | } |
4437 | |
4438 | void bnx2x_free_fp_mem(struct bnx2x *bp) |
4439 | { |
4440 | int i; |
4441 | for_each_eth_queue(bp, i) |
4442 | bnx2x_free_fp_mem_at(bp, fp_index: i); |
4443 | } |
4444 | |
4445 | static void set_sb_shortcuts(struct bnx2x *bp, int index) |
4446 | { |
4447 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); |
4448 | if (!CHIP_IS_E1x(bp)) { |
4449 | bnx2x_fp(bp, index, sb_index_values) = |
4450 | (__le16 *)status_blk.e2_sb->sb.index_values; |
4451 | bnx2x_fp(bp, index, sb_running_index) = |
4452 | (__le16 *)status_blk.e2_sb->sb.running_index; |
4453 | } else { |
4454 | bnx2x_fp(bp, index, sb_index_values) = |
4455 | (__le16 *)status_blk.e1x_sb->sb.index_values; |
4456 | bnx2x_fp(bp, index, sb_running_index) = |
4457 | (__le16 *)status_blk.e1x_sb->sb.running_index; |
4458 | } |
4459 | } |
4460 | |
4461 | /* Returns the number of actually allocated BDs */ |
4462 | static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, |
4463 | int rx_ring_size) |
4464 | { |
4465 | struct bnx2x *bp = fp->bp; |
4466 | u16 ring_prod, cqe_ring_prod; |
4467 | int i, failure_cnt = 0; |
4468 | |
4469 | fp->rx_comp_cons = 0; |
4470 | cqe_ring_prod = ring_prod = 0; |
4471 | |
4472 | /* This routine is called only during fo init so |
4473 | * fp->eth_q_stats.rx_skb_alloc_failed = 0 |
4474 | */ |
4475 | for (i = 0; i < rx_ring_size; i++) { |
4476 | if (bnx2x_alloc_rx_data(bp, fp, index: ring_prod, GFP_KERNEL) < 0) { |
4477 | failure_cnt++; |
4478 | continue; |
4479 | } |
4480 | ring_prod = NEXT_RX_IDX(ring_prod); |
4481 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); |
4482 | WARN_ON(ring_prod <= (i - failure_cnt)); |
4483 | } |
4484 | |
4485 | if (failure_cnt) |
4486 | BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n" , |
4487 | i - failure_cnt, fp->index); |
4488 | |
4489 | fp->rx_bd_prod = ring_prod; |
4490 | /* Limit the CQE producer by the CQE ring size */ |
4491 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, |
4492 | cqe_ring_prod); |
4493 | |
4494 | bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; |
4495 | |
4496 | return i - failure_cnt; |
4497 | } |
4498 | |
4499 | static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) |
4500 | { |
4501 | int i; |
4502 | |
4503 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { |
4504 | struct eth_rx_cqe_next_page *nextpg; |
4505 | |
4506 | nextpg = (struct eth_rx_cqe_next_page *) |
4507 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; |
4508 | nextpg->addr_hi = |
4509 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + |
4510 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
4511 | nextpg->addr_lo = |
4512 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + |
4513 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
4514 | } |
4515 | } |
4516 | |
4517 | static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) |
4518 | { |
4519 | union host_hc_status_block *sb; |
4520 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
4521 | int ring_size = 0; |
4522 | u8 cos; |
4523 | int rx_ring_size = 0; |
4524 | |
4525 | if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { |
4526 | rx_ring_size = MIN_RX_SIZE_NONTPA; |
4527 | bp->rx_ring_size = rx_ring_size; |
4528 | } else if (!bp->rx_ring_size) { |
4529 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); |
4530 | |
4531 | if (CHIP_IS_E3(bp)) { |
4532 | u32 cfg = SHMEM_RD(bp, |
4533 | dev_info.port_hw_config[BP_PORT(bp)]. |
4534 | default_cfg); |
4535 | |
4536 | /* Decrease ring size for 1G functions */ |
4537 | if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == |
4538 | PORT_HW_CFG_NET_SERDES_IF_SGMII) |
4539 | rx_ring_size /= 10; |
4540 | } |
4541 | |
4542 | /* allocate at least number of buffers required by FW */ |
4543 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : |
4544 | MIN_RX_SIZE_TPA, rx_ring_size); |
4545 | |
4546 | bp->rx_ring_size = rx_ring_size; |
4547 | } else /* if rx_ring_size specified - use it */ |
4548 | rx_ring_size = bp->rx_ring_size; |
4549 | |
4550 | DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n" , rx_ring_size); |
4551 | |
4552 | /* Common */ |
4553 | sb = &bnx2x_fp(bp, index, status_blk); |
4554 | |
4555 | if (!IS_FCOE_IDX(index)) { |
4556 | /* status blocks */ |
4557 | if (!CHIP_IS_E1x(bp)) { |
4558 | sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), |
4559 | sizeof(struct host_hc_status_block_e2)); |
4560 | if (!sb->e2_sb) |
4561 | goto alloc_mem_err; |
4562 | } else { |
4563 | sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), |
4564 | sizeof(struct host_hc_status_block_e1x)); |
4565 | if (!sb->e1x_sb) |
4566 | goto alloc_mem_err; |
4567 | } |
4568 | } |
4569 | |
4570 | /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to |
4571 | * set shortcuts for it. |
4572 | */ |
4573 | if (!IS_FCOE_IDX(index)) |
4574 | set_sb_shortcuts(bp, index); |
4575 | |
4576 | /* Tx */ |
4577 | if (!skip_tx_queue(bp, index)) { |
4578 | /* fastpath tx rings: tx_buf tx_desc */ |
4579 | for_each_cos_in_tx_queue(fp, cos) { |
4580 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
4581 | |
4582 | DP(NETIF_MSG_IFUP, |
4583 | "allocating tx memory of fp %d cos %d\n" , |
4584 | index, cos); |
4585 | |
4586 | txdata->tx_buf_ring = kcalloc(NUM_TX_BD, |
4587 | size: sizeof(struct sw_tx_bd), |
4588 | GFP_KERNEL); |
4589 | if (!txdata->tx_buf_ring) |
4590 | goto alloc_mem_err; |
4591 | txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, |
4592 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
4593 | if (!txdata->tx_desc_ring) |
4594 | goto alloc_mem_err; |
4595 | } |
4596 | } |
4597 | |
4598 | /* Rx */ |
4599 | if (!skip_rx_queue(bp, index)) { |
4600 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
4601 | bnx2x_fp(bp, index, rx_buf_ring) = |
4602 | kcalloc(NUM_RX_BD, size: sizeof(struct sw_rx_bd), GFP_KERNEL); |
4603 | if (!bnx2x_fp(bp, index, rx_buf_ring)) |
4604 | goto alloc_mem_err; |
4605 | bnx2x_fp(bp, index, rx_desc_ring) = |
4606 | BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), |
4607 | sizeof(struct eth_rx_bd) * NUM_RX_BD); |
4608 | if (!bnx2x_fp(bp, index, rx_desc_ring)) |
4609 | goto alloc_mem_err; |
4610 | |
4611 | /* Seed all CQEs by 1s */ |
4612 | bnx2x_fp(bp, index, rx_comp_ring) = |
4613 | BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), |
4614 | sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); |
4615 | if (!bnx2x_fp(bp, index, rx_comp_ring)) |
4616 | goto alloc_mem_err; |
4617 | |
4618 | /* SGE ring */ |
4619 | bnx2x_fp(bp, index, rx_page_ring) = |
4620 | kcalloc(NUM_RX_SGE, size: sizeof(struct sw_rx_page), |
4621 | GFP_KERNEL); |
4622 | if (!bnx2x_fp(bp, index, rx_page_ring)) |
4623 | goto alloc_mem_err; |
4624 | bnx2x_fp(bp, index, rx_sge_ring) = |
4625 | BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), |
4626 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
4627 | if (!bnx2x_fp(bp, index, rx_sge_ring)) |
4628 | goto alloc_mem_err; |
4629 | /* RX BD ring */ |
4630 | bnx2x_set_next_page_rx_bd(fp); |
4631 | |
4632 | /* CQ ring */ |
4633 | bnx2x_set_next_page_rx_cq(fp); |
4634 | |
4635 | /* BDs */ |
4636 | ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); |
4637 | if (ring_size < rx_ring_size) |
4638 | goto alloc_mem_err; |
4639 | } |
4640 | |
4641 | return 0; |
4642 | |
4643 | /* handles low memory cases */ |
4644 | alloc_mem_err: |
4645 | BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n" , |
4646 | index, ring_size); |
4647 | /* FW will drop all packets if queue is not big enough, |
4648 | * In these cases we disable the queue |
4649 | * Min size is different for OOO, TPA and non-TPA queues |
4650 | */ |
4651 | if (ring_size < (fp->mode == TPA_MODE_DISABLED ? |
4652 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { |
4653 | /* release memory allocated for this queue */ |
4654 | bnx2x_free_fp_mem_at(bp, fp_index: index); |
4655 | return -ENOMEM; |
4656 | } |
4657 | return 0; |
4658 | } |
4659 | |
4660 | static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) |
4661 | { |
4662 | if (!NO_FCOE(bp)) |
4663 | /* FCoE */ |
4664 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) |
4665 | /* we will fail load process instead of mark |
4666 | * NO_FCOE_FLAG |
4667 | */ |
4668 | return -ENOMEM; |
4669 | |
4670 | return 0; |
4671 | } |
4672 | |
4673 | static int bnx2x_alloc_fp_mem(struct bnx2x *bp) |
4674 | { |
4675 | int i; |
4676 | |
4677 | /* 1. Allocate FP for leading - fatal if error |
4678 | * 2. Allocate RSS - fix number of queues if error |
4679 | */ |
4680 | |
4681 | /* leading */ |
4682 | if (bnx2x_alloc_fp_mem_at(bp, index: 0)) |
4683 | return -ENOMEM; |
4684 | |
4685 | /* RSS */ |
4686 | for_each_nondefault_eth_queue(bp, i) |
4687 | if (bnx2x_alloc_fp_mem_at(bp, index: i)) |
4688 | break; |
4689 | |
4690 | /* handle memory failures */ |
4691 | if (i != BNX2X_NUM_ETH_QUEUES(bp)) { |
4692 | int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; |
4693 | |
4694 | WARN_ON(delta < 0); |
4695 | bnx2x_shrink_eth_fp(bp, delta); |
4696 | if (CNIC_SUPPORT(bp)) |
4697 | /* move non eth FPs next to last eth FP |
4698 | * must be done in that order |
4699 | * FCOE_IDX < FWD_IDX < OOO_IDX |
4700 | */ |
4701 | |
4702 | /* move FCoE fp even NO_FCOE_FLAG is on */ |
4703 | bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); |
4704 | bp->num_ethernet_queues -= delta; |
4705 | bp->num_queues = bp->num_ethernet_queues + |
4706 | bp->num_cnic_queues; |
4707 | BNX2X_ERR("Adjusted num of queues from %d to %d\n" , |
4708 | bp->num_queues + delta, bp->num_queues); |
4709 | } |
4710 | |
4711 | return 0; |
4712 | } |
4713 | |
4714 | void bnx2x_free_mem_bp(struct bnx2x *bp) |
4715 | { |
4716 | int i; |
4717 | |
4718 | for (i = 0; i < bp->fp_array_size; i++) |
4719 | kfree(objp: bp->fp[i].tpa_info); |
4720 | kfree(objp: bp->fp); |
4721 | kfree(objp: bp->sp_objs); |
4722 | kfree(objp: bp->fp_stats); |
4723 | kfree(objp: bp->bnx2x_txq); |
4724 | kfree(objp: bp->msix_table); |
4725 | kfree(objp: bp->ilt); |
4726 | } |
4727 | |
4728 | int bnx2x_alloc_mem_bp(struct bnx2x *bp) |
4729 | { |
4730 | struct bnx2x_fastpath *fp; |
4731 | struct msix_entry *tbl; |
4732 | struct bnx2x_ilt *ilt; |
4733 | int msix_table_size = 0; |
4734 | int fp_array_size, txq_array_size; |
4735 | int i; |
4736 | |
4737 | /* |
4738 | * The biggest MSI-X table we might need is as a maximum number of fast |
4739 | * path IGU SBs plus default SB (for PF only). |
4740 | */ |
4741 | msix_table_size = bp->igu_sb_cnt; |
4742 | if (IS_PF(bp)) |
4743 | msix_table_size++; |
4744 | BNX2X_DEV_INFO("msix_table_size %d\n" , msix_table_size); |
4745 | |
4746 | /* fp array: RSS plus CNIC related L2 queues */ |
4747 | fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); |
4748 | bp->fp_array_size = fp_array_size; |
4749 | BNX2X_DEV_INFO("fp_array_size %d\n" , bp->fp_array_size); |
4750 | |
4751 | fp = kcalloc(n: bp->fp_array_size, size: sizeof(*fp), GFP_KERNEL); |
4752 | if (!fp) |
4753 | goto alloc_err; |
4754 | for (i = 0; i < bp->fp_array_size; i++) { |
4755 | fp[i].tpa_info = |
4756 | kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, |
4757 | size: sizeof(struct bnx2x_agg_info), GFP_KERNEL); |
4758 | if (!(fp[i].tpa_info)) |
4759 | goto alloc_err; |
4760 | } |
4761 | |
4762 | bp->fp = fp; |
4763 | |
4764 | /* allocate sp objs */ |
4765 | bp->sp_objs = kcalloc(n: bp->fp_array_size, size: sizeof(struct bnx2x_sp_objs), |
4766 | GFP_KERNEL); |
4767 | if (!bp->sp_objs) |
4768 | goto alloc_err; |
4769 | |
4770 | /* allocate fp_stats */ |
4771 | bp->fp_stats = kcalloc(n: bp->fp_array_size, size: sizeof(struct bnx2x_fp_stats), |
4772 | GFP_KERNEL); |
4773 | if (!bp->fp_stats) |
4774 | goto alloc_err; |
4775 | |
4776 | /* Allocate memory for the transmission queues array */ |
4777 | txq_array_size = |
4778 | BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); |
4779 | BNX2X_DEV_INFO("txq_array_size %d" , txq_array_size); |
4780 | |
4781 | bp->bnx2x_txq = kcalloc(n: txq_array_size, size: sizeof(struct bnx2x_fp_txdata), |
4782 | GFP_KERNEL); |
4783 | if (!bp->bnx2x_txq) |
4784 | goto alloc_err; |
4785 | |
4786 | /* msix table */ |
4787 | tbl = kcalloc(n: msix_table_size, size: sizeof(*tbl), GFP_KERNEL); |
4788 | if (!tbl) |
4789 | goto alloc_err; |
4790 | bp->msix_table = tbl; |
4791 | |
4792 | /* ilt */ |
4793 | ilt = kzalloc(size: sizeof(*ilt), GFP_KERNEL); |
4794 | if (!ilt) |
4795 | goto alloc_err; |
4796 | bp->ilt = ilt; |
4797 | |
4798 | return 0; |
4799 | alloc_err: |
4800 | bnx2x_free_mem_bp(bp); |
4801 | return -ENOMEM; |
4802 | } |
4803 | |
4804 | int bnx2x_reload_if_running(struct net_device *dev) |
4805 | { |
4806 | struct bnx2x *bp = netdev_priv(dev); |
4807 | |
4808 | if (unlikely(!netif_running(dev))) |
4809 | return 0; |
4810 | |
4811 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, keep_link: true); |
4812 | return bnx2x_nic_load(bp, LOAD_NORMAL); |
4813 | } |
4814 | |
4815 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp) |
4816 | { |
4817 | u32 sel_phy_idx = 0; |
4818 | if (bp->link_params.num_phys <= 1) |
4819 | return INT_PHY; |
4820 | |
4821 | if (bp->link_vars.link_up) { |
4822 | sel_phy_idx = EXT_PHY1; |
4823 | /* In case link is SERDES, check if the EXT_PHY2 is the one */ |
4824 | if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && |
4825 | (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) |
4826 | sel_phy_idx = EXT_PHY2; |
4827 | } else { |
4828 | |
4829 | switch (bnx2x_phy_selection(params: &bp->link_params)) { |
4830 | case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: |
4831 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: |
4832 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: |
4833 | sel_phy_idx = EXT_PHY1; |
4834 | break; |
4835 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: |
4836 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: |
4837 | sel_phy_idx = EXT_PHY2; |
4838 | break; |
4839 | } |
4840 | } |
4841 | |
4842 | return sel_phy_idx; |
4843 | } |
4844 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp) |
4845 | { |
4846 | u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); |
4847 | /* |
4848 | * The selected activated PHY is always after swapping (in case PHY |
4849 | * swapping is enabled). So when swapping is enabled, we need to reverse |
4850 | * the configuration |
4851 | */ |
4852 | |
4853 | if (bp->link_params.multi_phy_config & |
4854 | PORT_HW_CFG_PHY_SWAPPED_ENABLED) { |
4855 | if (sel_phy_idx == EXT_PHY1) |
4856 | sel_phy_idx = EXT_PHY2; |
4857 | else if (sel_phy_idx == EXT_PHY2) |
4858 | sel_phy_idx = EXT_PHY1; |
4859 | } |
4860 | return LINK_CONFIG_IDX(sel_phy_idx); |
4861 | } |
4862 | |
4863 | #ifdef NETDEV_FCOE_WWNN |
4864 | int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) |
4865 | { |
4866 | struct bnx2x *bp = netdev_priv(dev); |
4867 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
4868 | |
4869 | switch (type) { |
4870 | case NETDEV_FCOE_WWNN: |
4871 | *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, |
4872 | cp->fcoe_wwn_node_name_lo); |
4873 | break; |
4874 | case NETDEV_FCOE_WWPN: |
4875 | *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, |
4876 | cp->fcoe_wwn_port_name_lo); |
4877 | break; |
4878 | default: |
4879 | BNX2X_ERR("Wrong WWN type requested - %d\n" , type); |
4880 | return -EINVAL; |
4881 | } |
4882 | |
4883 | return 0; |
4884 | } |
4885 | #endif |
4886 | |
4887 | /* called with rtnl_lock */ |
4888 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) |
4889 | { |
4890 | struct bnx2x *bp = netdev_priv(dev); |
4891 | |
4892 | if (pci_num_vf(dev: bp->pdev)) { |
4893 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n" ); |
4894 | return -EPERM; |
4895 | } |
4896 | |
4897 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
4898 | BNX2X_ERR("Can't perform change MTU during parity recovery\n" ); |
4899 | return -EAGAIN; |
4900 | } |
4901 | |
4902 | /* This does not race with packet allocation |
4903 | * because the actual alloc size is |
4904 | * only updated as part of load |
4905 | */ |
4906 | dev->mtu = new_mtu; |
4907 | |
4908 | if (!bnx2x_mtu_allows_gro(mtu: new_mtu)) |
4909 | dev->features &= ~NETIF_F_GRO_HW; |
4910 | |
4911 | if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) |
4912 | SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); |
4913 | |
4914 | return bnx2x_reload_if_running(dev); |
4915 | } |
4916 | |
4917 | netdev_features_t bnx2x_fix_features(struct net_device *dev, |
4918 | netdev_features_t features) |
4919 | { |
4920 | struct bnx2x *bp = netdev_priv(dev); |
4921 | |
4922 | if (pci_num_vf(dev: bp->pdev)) { |
4923 | netdev_features_t changed = dev->features ^ features; |
4924 | |
4925 | /* Revert the requested changes in features if they |
4926 | * would require internal reload of PF in bnx2x_set_features(). |
4927 | */ |
4928 | if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { |
4929 | features &= ~NETIF_F_RXCSUM; |
4930 | features |= dev->features & NETIF_F_RXCSUM; |
4931 | } |
4932 | |
4933 | if (changed & NETIF_F_LOOPBACK) { |
4934 | features &= ~NETIF_F_LOOPBACK; |
4935 | features |= dev->features & NETIF_F_LOOPBACK; |
4936 | } |
4937 | } |
4938 | |
4939 | /* TPA requires Rx CSUM offloading */ |
4940 | if (!(features & NETIF_F_RXCSUM)) |
4941 | features &= ~NETIF_F_LRO; |
4942 | |
4943 | if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(mtu: dev->mtu)) |
4944 | features &= ~NETIF_F_GRO_HW; |
4945 | if (features & NETIF_F_GRO_HW) |
4946 | features &= ~NETIF_F_LRO; |
4947 | |
4948 | return features; |
4949 | } |
4950 | |
4951 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) |
4952 | { |
4953 | struct bnx2x *bp = netdev_priv(dev); |
4954 | netdev_features_t changes = features ^ dev->features; |
4955 | bool bnx2x_reload = false; |
4956 | int rc; |
4957 | |
4958 | /* VFs or non SRIOV PFs should be able to change loopback feature */ |
4959 | if (!pci_num_vf(dev: bp->pdev)) { |
4960 | if (features & NETIF_F_LOOPBACK) { |
4961 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { |
4962 | bp->link_params.loopback_mode = LOOPBACK_BMAC; |
4963 | bnx2x_reload = true; |
4964 | } |
4965 | } else { |
4966 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { |
4967 | bp->link_params.loopback_mode = LOOPBACK_NONE; |
4968 | bnx2x_reload = true; |
4969 | } |
4970 | } |
4971 | } |
4972 | |
4973 | /* Don't care about GRO changes */ |
4974 | changes &= ~NETIF_F_GRO; |
4975 | |
4976 | if (changes) |
4977 | bnx2x_reload = true; |
4978 | |
4979 | if (bnx2x_reload) { |
4980 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) { |
4981 | dev->features = features; |
4982 | rc = bnx2x_reload_if_running(dev); |
4983 | return rc ? rc : 1; |
4984 | } |
4985 | /* else: bnx2x_nic_load() will be called at end of recovery */ |
4986 | } |
4987 | |
4988 | return 0; |
4989 | } |
4990 | |
4991 | void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue) |
4992 | { |
4993 | struct bnx2x *bp = netdev_priv(dev); |
4994 | |
4995 | /* We want the information of the dump logged, |
4996 | * but calling bnx2x_panic() would kill all chances of recovery. |
4997 | */ |
4998 | if (!bp->panic) |
4999 | #ifndef BNX2X_STOP_ON_ERROR |
5000 | bnx2x_panic_dump(bp, disable_int: false); |
5001 | #else |
5002 | bnx2x_panic(); |
5003 | #endif |
5004 | |
5005 | /* This allows the netif to be shutdown gracefully before resetting */ |
5006 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, verbose: 0); |
5007 | } |
5008 | |
5009 | static int __maybe_unused bnx2x_suspend(struct device *dev_d) |
5010 | { |
5011 | struct pci_dev *pdev = to_pci_dev(dev_d); |
5012 | struct net_device *dev = pci_get_drvdata(pdev); |
5013 | struct bnx2x *bp; |
5014 | |
5015 | if (!dev) { |
5016 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n" ); |
5017 | return -ENODEV; |
5018 | } |
5019 | bp = netdev_priv(dev); |
5020 | |
5021 | rtnl_lock(); |
5022 | |
5023 | if (!netif_running(dev)) { |
5024 | rtnl_unlock(); |
5025 | return 0; |
5026 | } |
5027 | |
5028 | netif_device_detach(dev); |
5029 | |
5030 | bnx2x_nic_unload(bp, UNLOAD_CLOSE, keep_link: false); |
5031 | |
5032 | rtnl_unlock(); |
5033 | |
5034 | return 0; |
5035 | } |
5036 | |
5037 | static int __maybe_unused bnx2x_resume(struct device *dev_d) |
5038 | { |
5039 | struct pci_dev *pdev = to_pci_dev(dev_d); |
5040 | struct net_device *dev = pci_get_drvdata(pdev); |
5041 | struct bnx2x *bp; |
5042 | int rc; |
5043 | |
5044 | if (!dev) { |
5045 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n" ); |
5046 | return -ENODEV; |
5047 | } |
5048 | bp = netdev_priv(dev); |
5049 | |
5050 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
5051 | BNX2X_ERR("Handling parity error recovery. Try again later\n" ); |
5052 | return -EAGAIN; |
5053 | } |
5054 | |
5055 | rtnl_lock(); |
5056 | |
5057 | if (!netif_running(dev)) { |
5058 | rtnl_unlock(); |
5059 | return 0; |
5060 | } |
5061 | |
5062 | netif_device_attach(dev); |
5063 | |
5064 | rc = bnx2x_nic_load(bp, LOAD_OPEN); |
5065 | |
5066 | rtnl_unlock(); |
5067 | |
5068 | return rc; |
5069 | } |
5070 | |
5071 | SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume); |
5072 | |
5073 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, |
5074 | u32 cid) |
5075 | { |
5076 | if (!cxt) { |
5077 | BNX2X_ERR("bad context pointer %p\n" , cxt); |
5078 | return; |
5079 | } |
5080 | |
5081 | /* ustorm cxt validation */ |
5082 | cxt->ustorm_ag_context.cdu_usage = |
5083 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), |
5084 | CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); |
5085 | /* xcontext validation */ |
5086 | cxt->xstorm_ag_context.cdu_reserved = |
5087 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), |
5088 | CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); |
5089 | } |
5090 | |
5091 | static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, |
5092 | u8 fw_sb_id, u8 sb_index, |
5093 | u8 ticks) |
5094 | { |
5095 | u32 addr = BAR_CSTRORM_INTMEM + |
5096 | CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); |
5097 | REG_WR8(bp, addr, ticks); |
5098 | DP(NETIF_MSG_IFUP, |
5099 | "port %x fw_sb_id %d sb_index %d ticks %d\n" , |
5100 | port, fw_sb_id, sb_index, ticks); |
5101 | } |
5102 | |
5103 | static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, |
5104 | u16 fw_sb_id, u8 sb_index, |
5105 | u8 disable) |
5106 | { |
5107 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
5108 | u32 addr = BAR_CSTRORM_INTMEM + |
5109 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); |
5110 | u8 flags = REG_RD8(bp, addr); |
5111 | /* clear and set */ |
5112 | flags &= ~HC_INDEX_DATA_HC_ENABLED; |
5113 | flags |= enable_flag; |
5114 | REG_WR8(bp, addr, flags); |
5115 | DP(NETIF_MSG_IFUP, |
5116 | "port %x fw_sb_id %d sb_index %d disable %d\n" , |
5117 | port, fw_sb_id, sb_index, disable); |
5118 | } |
5119 | |
5120 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, |
5121 | u8 sb_index, u8 disable, u16 usec) |
5122 | { |
5123 | int port = BP_PORT(bp); |
5124 | u8 ticks = usec / BNX2X_BTR; |
5125 | |
5126 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); |
5127 | |
5128 | disable = disable ? 1 : (usec ? 0 : 1); |
5129 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); |
5130 | } |
5131 | |
5132 | void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, |
5133 | u32 verbose) |
5134 | { |
5135 | smp_mb__before_atomic(); |
5136 | set_bit(nr: flag, addr: &bp->sp_rtnl_state); |
5137 | smp_mb__after_atomic(); |
5138 | DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n" , |
5139 | flag); |
5140 | schedule_delayed_work(dwork: &bp->sp_rtnl_task, delay: 0); |
5141 | } |
5142 | |