1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2018, Intel Corporation. */ |
3 | |
4 | #ifndef _ICE_H_ |
5 | #define _ICE_H_ |
6 | |
7 | #include <linux/types.h> |
8 | #include <linux/errno.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/firmware.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/compiler.h> |
14 | #include <linux/etherdevice.h> |
15 | #include <linux/skbuff.h> |
16 | #include <linux/cpumask.h> |
17 | #include <linux/rtnetlink.h> |
18 | #include <linux/if_vlan.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/pci.h> |
21 | #include <linux/workqueue.h> |
22 | #include <linux/wait.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/ethtool.h> |
25 | #include <linux/timer.h> |
26 | #include <linux/delay.h> |
27 | #include <linux/bitmap.h> |
28 | #include <linux/log2.h> |
29 | #include <linux/ip.h> |
30 | #include <linux/sctp.h> |
31 | #include <linux/ipv6.h> |
32 | #include <linux/pkt_sched.h> |
33 | #include <linux/if_bridge.h> |
34 | #include <linux/ctype.h> |
35 | #include <linux/linkmode.h> |
36 | #include <linux/bpf.h> |
37 | #include <linux/btf.h> |
38 | #include <linux/auxiliary_bus.h> |
39 | #include <linux/avf/virtchnl.h> |
40 | #include <linux/cpu_rmap.h> |
41 | #include <linux/dim.h> |
42 | #include <linux/gnss.h> |
43 | #include <net/pkt_cls.h> |
44 | #include <net/pkt_sched.h> |
45 | #include <net/tc_act/tc_mirred.h> |
46 | #include <net/tc_act/tc_gact.h> |
47 | #include <net/ip.h> |
48 | #include <net/devlink.h> |
49 | #include <net/ipv6.h> |
50 | #include <net/xdp_sock.h> |
51 | #include <net/xdp_sock_drv.h> |
52 | #include <net/geneve.h> |
53 | #include <net/gre.h> |
54 | #include <net/udp_tunnel.h> |
55 | #include <net/vxlan.h> |
56 | #include <net/gtp.h> |
57 | #include <linux/ppp_defs.h> |
58 | #include "ice_devids.h" |
59 | #include "ice_type.h" |
60 | #include "ice_txrx.h" |
61 | #include "ice_dcb.h" |
62 | #include "ice_switch.h" |
63 | #include "ice_common.h" |
64 | #include "ice_flow.h" |
65 | #include "ice_sched.h" |
66 | #include "ice_idc_int.h" |
67 | #include "ice_sriov.h" |
68 | #include "ice_vf_mbx.h" |
69 | #include "ice_ptp.h" |
70 | #include "ice_fdir.h" |
71 | #include "ice_xsk.h" |
72 | #include "ice_arfs.h" |
73 | #include "ice_repr.h" |
74 | #include "ice_eswitch.h" |
75 | #include "ice_lag.h" |
76 | #include "ice_vsi_vlan_ops.h" |
77 | #include "ice_gnss.h" |
78 | #include "ice_irq.h" |
79 | #include "ice_dpll.h" |
80 | |
81 | #define ICE_BAR0 0 |
82 | #define ICE_REQ_DESC_MULTIPLE 32 |
83 | #define ICE_MIN_NUM_DESC 64 |
84 | #define ICE_MAX_NUM_DESC 8160 |
85 | #define ICE_DFLT_MIN_RX_DESC 512 |
86 | #define ICE_DFLT_NUM_TX_DESC 256 |
87 | #define ICE_DFLT_NUM_RX_DESC 2048 |
88 | |
89 | #define ICE_DFLT_TRAFFIC_CLASS BIT(0) |
90 | #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) |
91 | #define ICE_AQ_LEN 192 |
92 | #define ICE_MBXSQ_LEN 64 |
93 | #define ICE_SBQ_LEN 64 |
94 | #define ICE_MIN_LAN_TXRX_MSIX 1 |
95 | #define ICE_MIN_LAN_OICR_MSIX 1 |
96 | #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) |
97 | #define ICE_FDIR_MSIX 2 |
98 | #define ICE_RDMA_NUM_AEQ_MSIX 4 |
99 | #define ICE_MIN_RDMA_MSIX 2 |
100 | #define ICE_ESWITCH_MSIX 1 |
101 | #define ICE_NO_VSI 0xffff |
102 | #define ICE_VSI_MAP_CONTIG 0 |
103 | #define ICE_VSI_MAP_SCATTER 1 |
104 | #define ICE_MAX_SCATTER_TXQS 16 |
105 | #define ICE_MAX_SCATTER_RXQS 16 |
106 | #define ICE_Q_WAIT_RETRY_LIMIT 10 |
107 | #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) |
108 | #define 256 |
109 | #define ICE_INVAL_Q_INDEX 0xffff |
110 | |
111 | #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ |
112 | |
113 | #define ICE_CHNL_START_TC 1 |
114 | |
115 | #define ICE_MAX_RESET_WAIT 20 |
116 | |
117 | #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) |
118 | |
119 | #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
120 | |
121 | #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) |
122 | |
123 | #define ICE_MAX_TSO_SIZE 131072 |
124 | |
125 | #define ICE_UP_TABLE_TRANSLATE(val, i) \ |
126 | (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ |
127 | ICE_AQ_VSI_UP_TABLE_UP##i##_M) |
128 | |
129 | #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) |
130 | #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) |
131 | #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) |
132 | #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) |
133 | |
134 | /* Minimum BW limit is 500 Kbps for any scheduler node */ |
135 | #define ICE_MIN_BW_LIMIT 500 |
136 | /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. |
137 | * use it to convert user specified BW limit into Kbps |
138 | */ |
139 | #define ICE_BW_KBPS_DIVISOR 125 |
140 | |
141 | /* Default recipes have priority 4 and below, hence priority values between 5..7 |
142 | * can be used as filter priority for advanced switch filter (advanced switch |
143 | * filters need new recipe to be created for specified extraction sequence |
144 | * because default recipe extraction sequence does not represent custom |
145 | * extraction) |
146 | */ |
147 | #define ICE_SWITCH_FLTR_PRIO_QUEUE 7 |
148 | /* prio 6 is reserved for future use (e.g. switch filter with L3 fields + |
149 | * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as |
150 | * SYN/FIN/RST)) |
151 | */ |
152 | #define ICE_SWITCH_FLTR_PRIO_RSVD 6 |
153 | #define ICE_SWITCH_FLTR_PRIO_VSI 5 |
154 | #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI |
155 | |
156 | /* Macro for each VSI in a PF */ |
157 | #define ice_for_each_vsi(pf, i) \ |
158 | for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) |
159 | |
160 | /* Macros for each Tx/Xdp/Rx ring in a VSI */ |
161 | #define ice_for_each_txq(vsi, i) \ |
162 | for ((i) = 0; (i) < (vsi)->num_txq; (i)++) |
163 | |
164 | #define ice_for_each_xdp_txq(vsi, i) \ |
165 | for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) |
166 | |
167 | #define ice_for_each_rxq(vsi, i) \ |
168 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
169 | |
170 | /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ |
171 | #define ice_for_each_alloc_txq(vsi, i) \ |
172 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) |
173 | |
174 | #define ice_for_each_alloc_rxq(vsi, i) \ |
175 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) |
176 | |
177 | #define ice_for_each_q_vector(vsi, i) \ |
178 | for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) |
179 | |
180 | #define ice_for_each_chnl_tc(i) \ |
181 | for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) |
182 | |
183 | #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) |
184 | |
185 | #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ |
186 | ICE_PROMISC_UCAST_RX | \ |
187 | ICE_PROMISC_VLAN_TX | \ |
188 | ICE_PROMISC_VLAN_RX) |
189 | |
190 | #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) |
191 | |
192 | #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ |
193 | ICE_PROMISC_MCAST_RX | \ |
194 | ICE_PROMISC_VLAN_TX | \ |
195 | ICE_PROMISC_VLAN_RX) |
196 | |
197 | #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) |
198 | |
199 | #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) |
200 | |
201 | enum ice_feature { |
202 | ICE_F_DSCP, |
203 | ICE_F_PHY_RCLK, |
204 | ICE_F_SMA_CTRL, |
205 | ICE_F_CGU, |
206 | ICE_F_GNSS, |
207 | ICE_F_ROCE_LAG, |
208 | ICE_F_SRIOV_LAG, |
209 | ICE_F_MAX |
210 | }; |
211 | |
212 | DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); |
213 | |
214 | struct ice_channel { |
215 | struct list_head list; |
216 | u8 type; |
217 | u16 sw_id; |
218 | u16 base_q; |
219 | u16 num_rxq; |
220 | u16 num_txq; |
221 | u16 vsi_num; |
222 | u8 ena_tc; |
223 | struct ice_aqc_vsi_props info; |
224 | u64 max_tx_rate; |
225 | u64 min_tx_rate; |
226 | atomic_t num_sb_fltr; |
227 | struct ice_vsi *ch_vsi; |
228 | }; |
229 | |
230 | struct ice_txq_meta { |
231 | u32 q_teid; /* Tx-scheduler element identifier */ |
232 | u16 q_id; /* Entry in VSI's txq_map bitmap */ |
233 | u16 q_handle; /* Relative index of Tx queue within TC */ |
234 | u16 vsi_idx; /* VSI index that Tx queue belongs to */ |
235 | u8 tc; /* TC number that Tx queue belongs to */ |
236 | }; |
237 | |
238 | struct ice_tc_info { |
239 | u16 qoffset; |
240 | u16 qcount_tx; |
241 | u16 qcount_rx; |
242 | u8 netdev_tc; |
243 | }; |
244 | |
245 | struct ice_tc_cfg { |
246 | u8 numtc; /* Total number of enabled TCs */ |
247 | u16 ena_tc; /* Tx map */ |
248 | struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; |
249 | }; |
250 | |
251 | struct ice_qs_cfg { |
252 | struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ |
253 | unsigned long *pf_map; |
254 | unsigned long pf_map_size; |
255 | unsigned int q_count; |
256 | unsigned int scatter_count; |
257 | u16 *vsi_map; |
258 | u16 vsi_map_offset; |
259 | u8 mapping_mode; |
260 | }; |
261 | |
262 | struct ice_sw { |
263 | struct ice_pf *pf; |
264 | u16 sw_id; /* switch ID for this switch */ |
265 | u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ |
266 | }; |
267 | |
268 | enum ice_pf_state { |
269 | ICE_TESTING, |
270 | ICE_DOWN, |
271 | ICE_NEEDS_RESTART, |
272 | ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ |
273 | ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ |
274 | ICE_PFR_REQ, /* set by driver */ |
275 | ICE_CORER_REQ, /* set by driver */ |
276 | ICE_GLOBR_REQ, /* set by driver */ |
277 | ICE_CORER_RECV, /* set by OICR handler */ |
278 | ICE_GLOBR_RECV, /* set by OICR handler */ |
279 | ICE_EMPR_RECV, /* set by OICR handler */ |
280 | ICE_SUSPENDED, /* set on module remove path */ |
281 | ICE_RESET_FAILED, /* set by reset/rebuild */ |
282 | /* When checking for the PF to be in a nominal operating state, the |
283 | * bits that are grouped at the beginning of the list need to be |
284 | * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will |
285 | * be checked. If you need to add a bit into consideration for nominal |
286 | * operating state, it must be added before |
287 | * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position |
288 | * without appropriate consideration. |
289 | */ |
290 | ICE_STATE_NOMINAL_CHECK_BITS, |
291 | ICE_ADMINQ_EVENT_PENDING, |
292 | ICE_MAILBOXQ_EVENT_PENDING, |
293 | ICE_SIDEBANDQ_EVENT_PENDING, |
294 | ICE_MDD_EVENT_PENDING, |
295 | ICE_VFLR_EVENT_PENDING, |
296 | ICE_FLTR_OVERFLOW_PROMISC, |
297 | ICE_VF_DIS, |
298 | ICE_CFG_BUSY, |
299 | ICE_SERVICE_SCHED, |
300 | ICE_SERVICE_DIS, |
301 | ICE_FD_FLUSH_REQ, |
302 | ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ |
303 | ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ |
304 | ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ |
305 | ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
306 | ICE_PHY_INIT_COMPLETE, |
307 | ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ |
308 | ICE_AUX_ERR_PENDING, |
309 | ICE_STATE_NBITS /* must be last */ |
310 | }; |
311 | |
312 | enum ice_vsi_state { |
313 | ICE_VSI_DOWN, |
314 | ICE_VSI_NEEDS_RESTART, |
315 | ICE_VSI_NETDEV_ALLOCD, |
316 | ICE_VSI_NETDEV_REGISTERED, |
317 | ICE_VSI_UMAC_FLTR_CHANGED, |
318 | ICE_VSI_MMAC_FLTR_CHANGED, |
319 | ICE_VSI_PROMISC_CHANGED, |
320 | ICE_VSI_STATE_NBITS /* must be last */ |
321 | }; |
322 | |
323 | struct ice_vsi_stats { |
324 | struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */ |
325 | struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */ |
326 | }; |
327 | |
328 | /* struct that defines a VSI, associated with a dev */ |
329 | struct ice_vsi { |
330 | struct net_device *netdev; |
331 | struct ice_sw *vsw; /* switch this VSI is on */ |
332 | struct ice_pf *back; /* back pointer to PF */ |
333 | struct ice_port_info *port_info; /* back pointer to port_info */ |
334 | struct ice_rx_ring **rx_rings; /* Rx ring array */ |
335 | struct ice_tx_ring **tx_rings; /* Tx ring array */ |
336 | struct ice_q_vector **q_vectors; /* q_vector array */ |
337 | |
338 | irqreturn_t (*irq_handler)(int irq, void *data); |
339 | |
340 | u64 tx_linearize; |
341 | DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); |
342 | unsigned int current_netdev_flags; |
343 | u32 tx_restart; |
344 | u32 tx_busy; |
345 | u32 rx_buf_failed; |
346 | u32 rx_page_failed; |
347 | u16 num_q_vectors; |
348 | /* tell if only dynamic irq allocation is allowed */ |
349 | bool irq_dyn_alloc; |
350 | |
351 | enum ice_vsi_type type; |
352 | u16 vsi_num; /* HW (absolute) index of this VSI */ |
353 | u16 idx; /* software index in pf->vsi[] */ |
354 | |
355 | struct ice_vf *vf; /* VF associated with this VSI */ |
356 | |
357 | u16 num_gfltr; |
358 | u16 num_bfltr; |
359 | |
360 | /* RSS config */ |
361 | u16 ; /* HW RSS table size */ |
362 | u16 ; /* Allocated RSS queues */ |
363 | u8 *; /* User configured hash keys */ |
364 | u8 *; /* User configured lookup table entries */ |
365 | u8 ; /* used to configure Get/Set RSS LUT AQ call */ |
366 | |
367 | /* aRFS members only allocated for the PF VSI */ |
368 | #define ICE_MAX_ARFS_LIST 1024 |
369 | #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) |
370 | struct hlist_head *arfs_fltr_list; |
371 | struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; |
372 | spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ |
373 | atomic_t *arfs_last_fltr_id; |
374 | |
375 | u16 max_frame; |
376 | u16 rx_buf_len; |
377 | |
378 | struct ice_aqc_vsi_props info; /* VSI properties */ |
379 | struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ |
380 | |
381 | /* VSI stats */ |
382 | struct rtnl_link_stats64 net_stats; |
383 | struct rtnl_link_stats64 net_stats_prev; |
384 | struct ice_eth_stats eth_stats; |
385 | struct ice_eth_stats eth_stats_prev; |
386 | |
387 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
388 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
389 | |
390 | u8 irqs_ready:1; |
391 | u8 current_isup:1; /* Sync 'link up' logging */ |
392 | u8 stat_offsets_loaded:1; |
393 | struct ice_vsi_vlan_ops inner_vlan_ops; |
394 | struct ice_vsi_vlan_ops outer_vlan_ops; |
395 | u16 num_vlan; |
396 | |
397 | /* queue information */ |
398 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
399 | u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
400 | u16 *txq_map; /* index in pf->avail_txqs */ |
401 | u16 *rxq_map; /* index in pf->avail_rxqs */ |
402 | u16 alloc_txq; /* Allocated Tx queues */ |
403 | u16 num_txq; /* Used Tx queues */ |
404 | u16 alloc_rxq; /* Allocated Rx queues */ |
405 | u16 num_rxq; /* Used Rx queues */ |
406 | u16 req_txq; /* User requested Tx queues */ |
407 | u16 req_rxq; /* User requested Rx queues */ |
408 | u16 num_rx_desc; |
409 | u16 num_tx_desc; |
410 | u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; |
411 | struct ice_tc_cfg tc_cfg; |
412 | struct bpf_prog *xdp_prog; |
413 | struct ice_tx_ring **xdp_rings; /* XDP ring array */ |
414 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ |
415 | u16 num_xdp_txq; /* Used XDP queues */ |
416 | u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
417 | |
418 | struct net_device **target_netdevs; |
419 | |
420 | struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ |
421 | |
422 | /* Channel Specific Fields */ |
423 | struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; |
424 | u16 cnt_q_avail; |
425 | u16 next_base_q; /* next queue to be used for channel setup */ |
426 | struct list_head ch_list; |
427 | u16 num_chnl_rxq; |
428 | u16 num_chnl_txq; |
429 | u16 ; |
430 | u16 num_chnl_fltr; |
431 | /* store away rss size info before configuring ADQ channels so that, |
432 | * it can be used after tc-qdisc delete, to get back RSS setting as |
433 | * they were before |
434 | */ |
435 | u16 ; |
436 | /* this keeps tracks of all enabled TC with and without DCB |
437 | * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue |
438 | * information |
439 | */ |
440 | u8 all_numtc; |
441 | u16 all_enatc; |
442 | |
443 | /* store away TC info, to be used for rebuild logic */ |
444 | u8 old_numtc; |
445 | u16 old_ena_tc; |
446 | |
447 | struct ice_channel *ch; |
448 | |
449 | /* setup back reference, to which aggregator node this VSI |
450 | * corresponds to |
451 | */ |
452 | struct ice_agg_node *agg_node; |
453 | } ____cacheline_internodealigned_in_smp; |
454 | |
455 | /* struct that defines an interrupt vector */ |
456 | struct ice_q_vector { |
457 | struct ice_vsi *vsi; |
458 | |
459 | u16 v_idx; /* index in the vsi->q_vector array. */ |
460 | u16 reg_idx; |
461 | u8 num_ring_rx; /* total number of Rx rings in vector */ |
462 | u8 num_ring_tx; /* total number of Tx rings in vector */ |
463 | u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ |
464 | /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this |
465 | * value to the device |
466 | */ |
467 | u8 intrl; |
468 | |
469 | struct napi_struct napi; |
470 | |
471 | struct ice_ring_container rx; |
472 | struct ice_ring_container tx; |
473 | |
474 | cpumask_t affinity_mask; |
475 | struct irq_affinity_notify affinity_notify; |
476 | |
477 | struct ice_channel *ch; |
478 | |
479 | char name[ICE_INT_NAME_STR_LEN]; |
480 | |
481 | u16 total_events; /* net_dim(): number of interrupts processed */ |
482 | struct msi_map irq; |
483 | } ____cacheline_internodealigned_in_smp; |
484 | |
485 | enum ice_pf_flags { |
486 | ICE_FLAG_FLTR_SYNC, |
487 | ICE_FLAG_RDMA_ENA, |
488 | , |
489 | ICE_FLAG_SRIOV_ENA, |
490 | ICE_FLAG_SRIOV_CAPABLE, |
491 | ICE_FLAG_DCB_CAPABLE, |
492 | ICE_FLAG_DCB_ENA, |
493 | ICE_FLAG_FD_ENA, |
494 | ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ |
495 | ICE_FLAG_PTP, /* PTP is enabled by software */ |
496 | ICE_FLAG_ADV_FEATURES, |
497 | ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ |
498 | ICE_FLAG_CLS_FLOWER, |
499 | ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, |
500 | ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, |
501 | ICE_FLAG_NO_MEDIA, |
502 | ICE_FLAG_FW_LLDP_AGENT, |
503 | ICE_FLAG_MOD_POWER_UNSUPPORTED, |
504 | ICE_FLAG_PHY_FW_LOAD_FAILED, |
505 | ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ |
506 | ICE_FLAG_LEGACY_RX, |
507 | ICE_FLAG_VF_TRUE_PROMISC_ENA, |
508 | ICE_FLAG_MDD_AUTO_RESET_VF, |
509 | ICE_FLAG_VF_VLAN_PRUNING, |
510 | ICE_FLAG_LINK_LENIENT_MODE_ENA, |
511 | ICE_FLAG_PLUG_AUX_DEV, |
512 | ICE_FLAG_UNPLUG_AUX_DEV, |
513 | ICE_FLAG_MTU_CHANGED, |
514 | ICE_FLAG_GNSS, /* GNSS successfully initialized */ |
515 | ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ |
516 | ICE_PF_FLAGS_NBITS /* must be last */ |
517 | }; |
518 | |
519 | enum ice_misc_thread_tasks { |
520 | ICE_MISC_THREAD_EXTTS_EVENT, |
521 | ICE_MISC_THREAD_TX_TSTAMP, |
522 | ICE_MISC_THREAD_NBITS /* must be last */ |
523 | }; |
524 | |
525 | struct ice_switchdev_info { |
526 | struct ice_vsi *control_vsi; |
527 | struct ice_vsi *uplink_vsi; |
528 | struct ice_esw_br_offloads *br_offloads; |
529 | bool is_running; |
530 | }; |
531 | |
532 | struct ice_agg_node { |
533 | u32 agg_id; |
534 | #define ICE_MAX_VSIS_IN_AGG_NODE 64 |
535 | u32 num_vsis; |
536 | u8 valid; |
537 | }; |
538 | |
539 | struct ice_pf { |
540 | struct pci_dev *pdev; |
541 | |
542 | struct devlink_region *nvm_region; |
543 | struct devlink_region *sram_region; |
544 | struct devlink_region *devcaps_region; |
545 | |
546 | /* devlink port data */ |
547 | struct devlink_port devlink_port; |
548 | |
549 | /* OS reserved IRQ details */ |
550 | struct msix_entry *msix_entries; |
551 | struct ice_irq_tracker irq_tracker; |
552 | /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the |
553 | * number of MSIX vectors needed for all SR-IOV VFs from the number of |
554 | * MSIX vectors allowed on this PF. |
555 | */ |
556 | u16 sriov_base_vector; |
557 | unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ |
558 | u16 sriov_irq_size; /* size of the irq_bm bitmap */ |
559 | |
560 | u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ |
561 | |
562 | struct ice_vsi **vsi; /* VSIs created by the driver */ |
563 | struct ice_vsi_stats **vsi_stats; |
564 | struct ice_sw *first_sw; /* first switch created by firmware */ |
565 | u16 eswitch_mode; /* current mode of eswitch */ |
566 | struct ice_vfs vfs; |
567 | DECLARE_BITMAP(features, ICE_F_MAX); |
568 | DECLARE_BITMAP(state, ICE_STATE_NBITS); |
569 | DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); |
570 | DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); |
571 | unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ |
572 | unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ |
573 | unsigned long serv_tmr_period; |
574 | unsigned long serv_tmr_prev; |
575 | struct timer_list serv_tmr; |
576 | struct work_struct serv_task; |
577 | struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ |
578 | struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ |
579 | struct mutex tc_mutex; /* lock to protect TC changes */ |
580 | struct mutex adev_mutex; /* lock to protect aux device access */ |
581 | struct mutex lag_mutex; /* protect ice_lag struct in PF */ |
582 | u32 msg_enable; |
583 | struct ice_ptp ptp; |
584 | struct gnss_serial *gnss_serial; |
585 | struct gnss_device *gnss_dev; |
586 | u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ |
587 | u16 rdma_base_vector; |
588 | |
589 | /* spinlock to protect the AdminQ wait list */ |
590 | spinlock_t aq_wait_lock; |
591 | struct hlist_head aq_wait_list; |
592 | wait_queue_head_t aq_wait_queue; |
593 | bool fw_emp_reset_disabled; |
594 | |
595 | wait_queue_head_t reset_wait_queue; |
596 | |
597 | u32 hw_csum_rx_error; |
598 | u32 oicr_err_reg; |
599 | struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ |
600 | u16 max_pf_txqs; /* Total Tx queues PF wide */ |
601 | u16 max_pf_rxqs; /* Total Rx queues PF wide */ |
602 | u16 num_lan_msix; /* Total MSIX vectors for base driver */ |
603 | u16 num_lan_tx; /* num LAN Tx queues setup */ |
604 | u16 num_lan_rx; /* num LAN Rx queues setup */ |
605 | u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ |
606 | u16 num_alloc_vsi; |
607 | u16 corer_count; /* Core reset count */ |
608 | u16 globr_count; /* Global reset count */ |
609 | u16 empr_count; /* EMP reset count */ |
610 | u16 pfr_count; /* PF reset count */ |
611 | |
612 | u8 wol_ena : 1; /* software state of WoL */ |
613 | u32 wakeup_reason; /* last wakeup reason */ |
614 | struct ice_hw_port_stats stats; |
615 | struct ice_hw_port_stats stats_prev; |
616 | struct ice_hw hw; |
617 | u8 stat_prev_loaded:1; /* has previous stats been loaded */ |
618 | u8 rdma_mode; |
619 | u16 dcbx_cap; |
620 | u32 tx_timeout_count; |
621 | unsigned long tx_timeout_last_recovery; |
622 | u32 tx_timeout_recovery_level; |
623 | char int_name[ICE_INT_NAME_STR_LEN]; |
624 | struct auxiliary_device *adev; |
625 | int aux_idx; |
626 | u32 sw_int_count; |
627 | /* count of tc_flower filters specific to channel (aka where filter |
628 | * action is "hw_tc <tc_num>") |
629 | */ |
630 | u16 num_dmac_chnl_fltrs; |
631 | struct hlist_head tc_flower_fltr_list; |
632 | |
633 | u64 supported_rxdids; |
634 | |
635 | __le64 nvm_phy_type_lo; /* NVM PHY type low */ |
636 | __le64 nvm_phy_type_hi; /* NVM PHY type high */ |
637 | struct ice_link_default_override_tlv link_dflt_override; |
638 | struct ice_lag *lag; /* Link Aggregation information */ |
639 | |
640 | struct ice_switchdev_info switchdev; |
641 | struct ice_esw_br_port *br_port; |
642 | |
643 | #define ICE_INVALID_AGG_NODE_ID 0 |
644 | #define ICE_PF_AGG_NODE_ID_START 1 |
645 | #define ICE_MAX_PF_AGG_NODES 32 |
646 | struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; |
647 | #define ICE_VF_AGG_NODE_ID_START 65 |
648 | #define ICE_MAX_VF_AGG_NODES 32 |
649 | struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; |
650 | struct ice_dplls dplls; |
651 | }; |
652 | |
653 | extern struct workqueue_struct *ice_lag_wq; |
654 | |
655 | struct ice_netdev_priv { |
656 | struct ice_vsi *vsi; |
657 | struct ice_repr *repr; |
658 | /* indirect block callbacks on registered higher level devices |
659 | * (e.g. tunnel devices) |
660 | * |
661 | * tc_indr_block_cb_priv_list is used to look up indirect callback |
662 | * private data |
663 | */ |
664 | struct list_head tc_indr_block_priv_list; |
665 | }; |
666 | |
667 | /** |
668 | * ice_vector_ch_enabled |
669 | * @qv: pointer to q_vector, can be NULL |
670 | * |
671 | * This function returns true if vector is channel enabled otherwise false |
672 | */ |
673 | static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) |
674 | { |
675 | return !!qv->ch; /* Enable it to run with TC */ |
676 | } |
677 | |
678 | /** |
679 | * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt |
680 | * @pf: Board private structure |
681 | * |
682 | * Return true if this PF should respond to the Tx timestamp interrupt |
683 | * indication in the miscellaneous OICR interrupt handler. |
684 | */ |
685 | static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) |
686 | { |
687 | return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; |
688 | } |
689 | |
690 | /** |
691 | * ice_irq_dynamic_ena - Enable default interrupt generation settings |
692 | * @hw: pointer to HW struct |
693 | * @vsi: pointer to VSI struct, can be NULL |
694 | * @q_vector: pointer to q_vector, can be NULL |
695 | */ |
696 | static inline void |
697 | ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, |
698 | struct ice_q_vector *q_vector) |
699 | { |
700 | u32 vector = (vsi && q_vector) ? q_vector->reg_idx : |
701 | ((struct ice_pf *)hw->back)->oicr_irq.index; |
702 | int itr = ICE_ITR_NONE; |
703 | u32 val; |
704 | |
705 | /* clear the PBA here, as this function is meant to clean out all |
706 | * previous interrupts and enable the interrupt |
707 | */ |
708 | val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | |
709 | (itr << GLINT_DYN_CTL_ITR_INDX_S); |
710 | if (vsi) |
711 | if (test_bit(ICE_VSI_DOWN, vsi->state)) |
712 | return; |
713 | wr32(hw, GLINT_DYN_CTL(vector), val); |
714 | } |
715 | |
716 | /** |
717 | * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev |
718 | * @netdev: pointer to the netdev struct |
719 | */ |
720 | static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) |
721 | { |
722 | struct ice_netdev_priv *np = netdev_priv(dev: netdev); |
723 | |
724 | return np->vsi->back; |
725 | } |
726 | |
727 | static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) |
728 | { |
729 | return !!READ_ONCE(vsi->xdp_prog); |
730 | } |
731 | |
732 | static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) |
733 | { |
734 | ring->flags |= ICE_TX_FLAGS_RING_XDP; |
735 | } |
736 | |
737 | /** |
738 | * ice_xsk_pool - get XSK buffer pool bound to a ring |
739 | * @ring: Rx ring to use |
740 | * |
741 | * Returns a pointer to xsk_buff_pool structure if there is a buffer pool |
742 | * present, NULL otherwise. |
743 | */ |
744 | static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) |
745 | { |
746 | struct ice_vsi *vsi = ring->vsi; |
747 | u16 qid = ring->q_index; |
748 | |
749 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
750 | return NULL; |
751 | |
752 | return xsk_get_pool_from_qid(dev: vsi->netdev, queue_id: qid); |
753 | } |
754 | |
755 | /** |
756 | * ice_tx_xsk_pool - assign XSK buff pool to XDP ring |
757 | * @vsi: pointer to VSI |
758 | * @qid: index of a queue to look at XSK buff pool presence |
759 | * |
760 | * Sets XSK buff pool pointer on XDP ring. |
761 | * |
762 | * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided |
763 | * queue id. Reason for doing so is that queue vectors might have assigned more |
764 | * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring |
765 | * carries a pointer to one of these XDP rings for its own purposes, such as |
766 | * handling XDP_TX action, therefore we can piggyback here on the |
767 | * rx_ring->xdp_ring assignment that was done during XDP rings initialization. |
768 | */ |
769 | static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) |
770 | { |
771 | struct ice_tx_ring *ring; |
772 | |
773 | ring = vsi->rx_rings[qid]->xdp_ring; |
774 | if (!ring) |
775 | return; |
776 | |
777 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { |
778 | ring->xsk_pool = NULL; |
779 | return; |
780 | } |
781 | |
782 | ring->xsk_pool = xsk_get_pool_from_qid(dev: vsi->netdev, queue_id: qid); |
783 | } |
784 | |
785 | /** |
786 | * ice_get_main_vsi - Get the PF VSI |
787 | * @pf: PF instance |
788 | * |
789 | * returns pf->vsi[0], which by definition is the PF VSI |
790 | */ |
791 | static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) |
792 | { |
793 | if (pf->vsi) |
794 | return pf->vsi[0]; |
795 | |
796 | return NULL; |
797 | } |
798 | |
799 | /** |
800 | * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. |
801 | * @np: private netdev structure |
802 | */ |
803 | static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) |
804 | { |
805 | /* In case of port representor return source port VSI. */ |
806 | if (np->repr) |
807 | return np->repr->src_vsi; |
808 | else |
809 | return np->vsi; |
810 | } |
811 | |
812 | /** |
813 | * ice_get_ctrl_vsi - Get the control VSI |
814 | * @pf: PF instance |
815 | */ |
816 | static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) |
817 | { |
818 | /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ |
819 | if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) |
820 | return NULL; |
821 | |
822 | return pf->vsi[pf->ctrl_vsi_idx]; |
823 | } |
824 | |
825 | /** |
826 | * ice_find_vsi - Find the VSI from VSI ID |
827 | * @pf: The PF pointer to search in |
828 | * @vsi_num: The VSI ID to search for |
829 | */ |
830 | static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) |
831 | { |
832 | int i; |
833 | |
834 | ice_for_each_vsi(pf, i) |
835 | if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) |
836 | return pf->vsi[i]; |
837 | return NULL; |
838 | } |
839 | |
840 | /** |
841 | * ice_is_switchdev_running - check if switchdev is configured |
842 | * @pf: pointer to PF structure |
843 | * |
844 | * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV |
845 | * and switchdev is configured, false otherwise. |
846 | */ |
847 | static inline bool ice_is_switchdev_running(struct ice_pf *pf) |
848 | { |
849 | return pf->switchdev.is_running; |
850 | } |
851 | |
852 | #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 |
853 | #define ICE_FD_STAT_PF_IDX(base_idx) \ |
854 | ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) |
855 | #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) |
856 | #define ICE_FD_STAT_CH 1 |
857 | #define ICE_FD_CH_STAT_IDX(base_idx) \ |
858 | (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) |
859 | |
860 | /** |
861 | * ice_is_adq_active - any active ADQs |
862 | * @pf: pointer to PF |
863 | * |
864 | * This function returns true if there are any ADQs configured (which is |
865 | * determined by looking at VSI type (which should be VSI_PF), numtc, and |
866 | * TC_MQPRIO flag) otherwise return false |
867 | */ |
868 | static inline bool ice_is_adq_active(struct ice_pf *pf) |
869 | { |
870 | struct ice_vsi *vsi; |
871 | |
872 | vsi = ice_get_main_vsi(pf); |
873 | if (!vsi) |
874 | return false; |
875 | |
876 | /* is ADQ configured */ |
877 | if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && |
878 | test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) |
879 | return true; |
880 | |
881 | return false; |
882 | } |
883 | |
884 | bool netif_is_ice(const struct net_device *dev); |
885 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); |
886 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); |
887 | int ice_vsi_open_ctrl(struct ice_vsi *vsi); |
888 | int ice_vsi_open(struct ice_vsi *vsi); |
889 | void ice_set_ethtool_ops(struct net_device *netdev); |
890 | void ice_set_ethtool_repr_ops(struct net_device *netdev); |
891 | void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); |
892 | u16 ice_get_avail_txq_count(struct ice_pf *pf); |
893 | u16 ice_get_avail_rxq_count(struct ice_pf *pf); |
894 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); |
895 | void ice_update_vsi_stats(struct ice_vsi *vsi); |
896 | void ice_update_pf_stats(struct ice_pf *pf); |
897 | void |
898 | ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, |
899 | struct ice_q_stats stats, u64 *pkts, u64 *bytes); |
900 | int ice_up(struct ice_vsi *vsi); |
901 | int ice_down(struct ice_vsi *vsi); |
902 | int ice_down_up(struct ice_vsi *vsi); |
903 | int ice_vsi_cfg_lan(struct ice_vsi *vsi); |
904 | struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); |
905 | int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); |
906 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); |
907 | int ice_destroy_xdp_rings(struct ice_vsi *vsi); |
908 | int |
909 | ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
910 | u32 flags); |
911 | int (struct ice_vsi *vsi, u8 *lut, u16 lut_size); |
912 | int (struct ice_vsi *vsi, u8 *lut, u16 lut_size); |
913 | int (struct ice_vsi *vsi, u8 *seed); |
914 | int (struct ice_vsi *vsi, u8 *seed); |
915 | void (u8 *lut, u16 , u16 ); |
916 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); |
917 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup); |
918 | int ice_plug_aux_dev(struct ice_pf *pf); |
919 | void ice_unplug_aux_dev(struct ice_pf *pf); |
920 | int ice_init_rdma(struct ice_pf *pf); |
921 | void ice_deinit_rdma(struct ice_pf *pf); |
922 | const char *ice_aq_str(enum ice_aq_err aq_err); |
923 | bool ice_is_wol_supported(struct ice_hw *hw); |
924 | void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); |
925 | int |
926 | ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, |
927 | bool is_tun); |
928 | void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); |
929 | int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); |
930 | int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); |
931 | int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); |
932 | int |
933 | ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, |
934 | u32 *rule_locs); |
935 | void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); |
936 | void ice_fdir_release_flows(struct ice_hw *hw); |
937 | void ice_fdir_replay_flows(struct ice_hw *hw); |
938 | void ice_fdir_replay_fltrs(struct ice_pf *pf); |
939 | int ice_fdir_create_dflt_rules(struct ice_pf *pf); |
940 | |
941 | enum ice_aq_task_state { |
942 | ICE_AQ_TASK_NOT_PREPARED, |
943 | ICE_AQ_TASK_WAITING, |
944 | ICE_AQ_TASK_COMPLETE, |
945 | ICE_AQ_TASK_CANCELED, |
946 | }; |
947 | |
948 | struct ice_aq_task { |
949 | struct hlist_node entry; |
950 | struct ice_rq_event_info event; |
951 | enum ice_aq_task_state state; |
952 | u16 opcode; |
953 | }; |
954 | |
955 | void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
956 | u16 opcode); |
957 | int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
958 | unsigned long timeout); |
959 | int ice_open(struct net_device *netdev); |
960 | int ice_open_internal(struct net_device *netdev); |
961 | int ice_stop(struct net_device *netdev); |
962 | void ice_service_task_schedule(struct ice_pf *pf); |
963 | int ice_load(struct ice_pf *pf); |
964 | void ice_unload(struct ice_pf *pf); |
965 | void ice_adv_lnk_speed_maps_init(void); |
966 | |
967 | /** |
968 | * ice_set_rdma_cap - enable RDMA support |
969 | * @pf: PF struct |
970 | */ |
971 | static inline void ice_set_rdma_cap(struct ice_pf *pf) |
972 | { |
973 | if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { |
974 | set_bit(nr: ICE_FLAG_RDMA_ENA, addr: pf->flags); |
975 | set_bit(nr: ICE_FLAG_PLUG_AUX_DEV, addr: pf->flags); |
976 | } |
977 | } |
978 | |
979 | /** |
980 | * ice_clear_rdma_cap - disable RDMA support |
981 | * @pf: PF struct |
982 | */ |
983 | static inline void ice_clear_rdma_cap(struct ice_pf *pf) |
984 | { |
985 | /* defer unplug to service task to avoid RTNL lock and |
986 | * clear PLUG bit so that pending plugs don't interfere |
987 | */ |
988 | clear_bit(nr: ICE_FLAG_PLUG_AUX_DEV, addr: pf->flags); |
989 | set_bit(nr: ICE_FLAG_UNPLUG_AUX_DEV, addr: pf->flags); |
990 | clear_bit(nr: ICE_FLAG_RDMA_ENA, addr: pf->flags); |
991 | } |
992 | #endif /* _ICE_H_ */ |
993 | |