1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2013 - 2019 Intel Corporation. */ |
3 | |
4 | #ifndef _FM10K_H_ |
5 | #define _FM10K_H_ |
6 | |
7 | #include <linux/types.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/cpumask.h> |
10 | #include <linux/rtnetlink.h> |
11 | #include <linux/if_vlan.h> |
12 | #include <linux/pci.h> |
13 | |
14 | #include "fm10k_pf.h" |
15 | #include "fm10k_vf.h" |
16 | |
17 | #define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */ |
18 | |
19 | #define MAX_QUEUES FM10K_MAX_QUEUES_PF |
20 | |
21 | #define FM10K_MIN_RXD 128 |
22 | #define FM10K_MAX_RXD 4096 |
23 | #define FM10K_DEFAULT_RXD 256 |
24 | |
25 | #define FM10K_MIN_TXD 128 |
26 | #define FM10K_MAX_TXD 4096 |
27 | #define FM10K_DEFAULT_TXD 256 |
28 | #define FM10K_DEFAULT_TX_WORK 256 |
29 | |
30 | #define FM10K_RXBUFFER_256 256 |
31 | #define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256 |
32 | #define FM10K_RXBUFFER_2048 2048 |
33 | #define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048 |
34 | |
35 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
36 | #define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
37 | |
38 | #define FM10K_MAX_STATIONS 63 |
39 | struct fm10k_l2_accel { |
40 | int size; |
41 | u16 count; |
42 | u16 dglort; |
43 | struct rcu_head rcu; |
44 | struct net_device *macvlan[]; |
45 | }; |
46 | |
47 | enum fm10k_ring_state_t { |
48 | __FM10K_TX_DETECT_HANG, |
49 | __FM10K_HANG_CHECK_ARMED, |
50 | __FM10K_TX_XPS_INIT_DONE, |
51 | /* This must be last and is used to calculate BITMAP size */ |
52 | __FM10K_TX_STATE_SIZE__, |
53 | }; |
54 | |
55 | #define check_for_tx_hang(ring) \ |
56 | test_bit(__FM10K_TX_DETECT_HANG, (ring)->state) |
57 | #define set_check_for_tx_hang(ring) \ |
58 | set_bit(__FM10K_TX_DETECT_HANG, (ring)->state) |
59 | #define clear_check_for_tx_hang(ring) \ |
60 | clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state) |
61 | |
62 | struct fm10k_tx_buffer { |
63 | struct fm10k_tx_desc *next_to_watch; |
64 | struct sk_buff *skb; |
65 | unsigned int bytecount; |
66 | u16 gso_segs; |
67 | u16 tx_flags; |
68 | DEFINE_DMA_UNMAP_ADDR(dma); |
69 | DEFINE_DMA_UNMAP_LEN(len); |
70 | }; |
71 | |
72 | struct fm10k_rx_buffer { |
73 | dma_addr_t dma; |
74 | struct page *page; |
75 | u32 page_offset; |
76 | }; |
77 | |
78 | struct fm10k_queue_stats { |
79 | u64 packets; |
80 | u64 bytes; |
81 | }; |
82 | |
83 | struct fm10k_tx_queue_stats { |
84 | u64 restart_queue; |
85 | u64 csum_err; |
86 | u64 tx_busy; |
87 | u64 tx_done_old; |
88 | u64 csum_good; |
89 | }; |
90 | |
91 | struct fm10k_rx_queue_stats { |
92 | u64 alloc_failed; |
93 | u64 csum_err; |
94 | u64 errors; |
95 | u64 csum_good; |
96 | u64 switch_errors; |
97 | u64 drops; |
98 | u64 pp_errors; |
99 | u64 link_errors; |
100 | u64 length_errors; |
101 | }; |
102 | |
103 | struct fm10k_ring { |
104 | struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ |
105 | struct net_device *netdev; /* netdev ring belongs to */ |
106 | struct device *dev; /* device for DMA mapping */ |
107 | struct fm10k_l2_accel __rcu *l2_accel; /* L2 acceleration list */ |
108 | void *desc; /* descriptor ring memory */ |
109 | union { |
110 | struct fm10k_tx_buffer *tx_buffer; |
111 | struct fm10k_rx_buffer *rx_buffer; |
112 | }; |
113 | u32 __iomem *tail; |
114 | DECLARE_BITMAP(state, __FM10K_TX_STATE_SIZE__); |
115 | dma_addr_t dma; /* phys. address of descriptor ring */ |
116 | unsigned int size; /* length in bytes */ |
117 | |
118 | u8 queue_index; /* needed for queue management */ |
119 | u8 reg_idx; /* holds the special value that gets |
120 | * the hardware register offset |
121 | * associated with this ring, which is |
122 | * different for DCB and RSS modes |
123 | */ |
124 | u8 qos_pc; /* priority class of queue */ |
125 | u16 vid; /* default VLAN ID of queue */ |
126 | u16 count; /* amount of descriptors */ |
127 | |
128 | u16 next_to_alloc; |
129 | u16 next_to_use; |
130 | u16 next_to_clean; |
131 | |
132 | struct fm10k_queue_stats stats; |
133 | struct u64_stats_sync syncp; |
134 | union { |
135 | /* Tx */ |
136 | struct fm10k_tx_queue_stats tx_stats; |
137 | /* Rx */ |
138 | struct { |
139 | struct fm10k_rx_queue_stats rx_stats; |
140 | struct sk_buff *skb; |
141 | }; |
142 | }; |
143 | } ____cacheline_internodealigned_in_smp; |
144 | |
145 | struct fm10k_ring_container { |
146 | struct fm10k_ring *ring; /* pointer to linked list of rings */ |
147 | unsigned int total_bytes; /* total bytes processed this int */ |
148 | unsigned int total_packets; /* total packets processed this int */ |
149 | u16 work_limit; /* total work allowed per interrupt */ |
150 | u16 itr; /* interrupt throttle rate value */ |
151 | u8 itr_scale; /* ITR adjustment based on PCI speed */ |
152 | u8 count; /* total number of rings in vector */ |
153 | }; |
154 | |
155 | #define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ |
156 | #define FM10K_ITR_10K 100 /* 100us */ |
157 | #define FM10K_ITR_20K 50 /* 50us */ |
158 | #define FM10K_ITR_40K 25 /* 25us */ |
159 | #define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ |
160 | |
161 | #define ITR_IS_ADAPTIVE(itr) (!!(itr & FM10K_ITR_ADAPTIVE)) |
162 | |
163 | #define FM10K_TX_ITR_DEFAULT FM10K_ITR_40K |
164 | #define FM10K_RX_ITR_DEFAULT FM10K_ITR_20K |
165 | #define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) |
166 | |
167 | static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) |
168 | { |
169 | return &ring->netdev->_tx[ring->queue_index]; |
170 | } |
171 | |
172 | /* iterator for handling rings in ring container */ |
173 | #define fm10k_for_each_ring(pos, head) \ |
174 | for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;) |
175 | |
176 | #define MAX_Q_VECTORS 256 |
177 | #define MIN_Q_VECTORS 1 |
178 | enum fm10k_non_q_vectors { |
179 | FM10K_MBX_VECTOR, |
180 | NON_Q_VECTORS |
181 | }; |
182 | |
183 | #define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS) |
184 | |
185 | struct fm10k_q_vector { |
186 | struct fm10k_intfc *interface; |
187 | u32 __iomem *itr; /* pointer to ITR register for this vector */ |
188 | u16 v_idx; /* index of q_vector within interface array */ |
189 | struct fm10k_ring_container rx, tx; |
190 | |
191 | struct napi_struct napi; |
192 | cpumask_t affinity_mask; |
193 | char name[IFNAMSIZ + 9]; |
194 | |
195 | #ifdef CONFIG_DEBUG_FS |
196 | struct dentry *dbg_q_vector; |
197 | #endif /* CONFIG_DEBUG_FS */ |
198 | struct rcu_head rcu; /* to avoid race with update stats on free */ |
199 | |
200 | /* for dynamic allocation of rings associated with this q_vector */ |
201 | struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp; |
202 | }; |
203 | |
204 | enum fm10k_ring_f_enum { |
205 | , |
206 | RING_F_QOS, |
207 | RING_F_ARRAY_SIZE /* must be last in enum set */ |
208 | }; |
209 | |
210 | struct fm10k_ring_feature { |
211 | u16 limit; /* upper limit on feature indices */ |
212 | u16 indices; /* current value of indices */ |
213 | u16 mask; /* Mask used for feature to ring mapping */ |
214 | u16 offset; /* offset to start of feature */ |
215 | }; |
216 | |
217 | struct fm10k_iov_data { |
218 | unsigned int num_vfs; |
219 | unsigned int next_vf_mbx; |
220 | struct rcu_head rcu; |
221 | struct fm10k_vf_info vf_info[]; |
222 | }; |
223 | |
224 | enum fm10k_macvlan_request_type { |
225 | FM10K_UC_MAC_REQUEST, |
226 | FM10K_MC_MAC_REQUEST, |
227 | FM10K_VLAN_REQUEST |
228 | }; |
229 | |
230 | struct fm10k_macvlan_request { |
231 | enum fm10k_macvlan_request_type type; |
232 | struct list_head list; |
233 | union { |
234 | struct fm10k_mac_request { |
235 | u8 addr[ETH_ALEN]; |
236 | u16 glort; |
237 | u16 vid; |
238 | } mac; |
239 | struct fm10k_vlan_request { |
240 | u32 vid; |
241 | u8 vsi; |
242 | } vlan; |
243 | }; |
244 | bool set; |
245 | }; |
246 | |
247 | /* one work queue for entire driver */ |
248 | extern struct workqueue_struct *fm10k_workqueue; |
249 | |
250 | /* The following enumeration contains flags which indicate or enable modified |
251 | * driver behaviors. To avoid race conditions, the flags are stored in |
252 | * a BITMAP in the fm10k_intfc structure. The BITMAP should be accessed using |
253 | * atomic *_bit() operations. |
254 | */ |
255 | enum fm10k_flags_t { |
256 | FM10K_FLAG_RESET_REQUESTED, |
257 | , |
258 | , |
259 | FM10K_FLAG_SWPRI_CONFIG, |
260 | /* __FM10K_FLAGS_SIZE__ is used to calculate the size of |
261 | * interface->flags and must be the last value in this |
262 | * enumeration. |
263 | */ |
264 | __FM10K_FLAGS_SIZE__ |
265 | }; |
266 | |
267 | enum fm10k_state_t { |
268 | __FM10K_RESETTING, |
269 | __FM10K_RESET_DETACHED, |
270 | __FM10K_RESET_SUSPENDED, |
271 | __FM10K_DOWN, |
272 | __FM10K_SERVICE_SCHED, |
273 | __FM10K_SERVICE_REQUEST, |
274 | __FM10K_SERVICE_DISABLE, |
275 | __FM10K_MACVLAN_SCHED, |
276 | __FM10K_MACVLAN_REQUEST, |
277 | __FM10K_MACVLAN_DISABLE, |
278 | __FM10K_LINK_DOWN, |
279 | __FM10K_UPDATING_STATS, |
280 | /* This value must be last and determines the BITMAP size */ |
281 | __FM10K_STATE_SIZE__, |
282 | }; |
283 | |
284 | struct fm10k_intfc { |
285 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
286 | struct net_device *netdev; |
287 | struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */ |
288 | struct pci_dev *pdev; |
289 | DECLARE_BITMAP(state, __FM10K_STATE_SIZE__); |
290 | |
291 | /* Access flag values using atomic *_bit() operations */ |
292 | DECLARE_BITMAP(flags, __FM10K_FLAGS_SIZE__); |
293 | |
294 | int xcast_mode; |
295 | |
296 | /* Tx fast path data */ |
297 | int num_tx_queues; |
298 | u16 tx_itr; |
299 | |
300 | /* Rx fast path data */ |
301 | int num_rx_queues; |
302 | u16 rx_itr; |
303 | |
304 | /* TX */ |
305 | struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp; |
306 | |
307 | u64 restart_queue; |
308 | u64 tx_busy; |
309 | u64 tx_csum_errors; |
310 | u64 alloc_failed; |
311 | u64 rx_csum_errors; |
312 | |
313 | u64 tx_bytes_nic; |
314 | u64 tx_packets_nic; |
315 | u64 rx_bytes_nic; |
316 | u64 rx_packets_nic; |
317 | u64 rx_drops_nic; |
318 | u64 rx_overrun_pf; |
319 | u64 rx_overrun_vf; |
320 | |
321 | /* Debug Statistics */ |
322 | u64 hw_sm_mbx_full; |
323 | u64 hw_csum_tx_good; |
324 | u64 hw_csum_rx_good; |
325 | u64 rx_switch_errors; |
326 | u64 rx_drops; |
327 | u64 rx_pp_errors; |
328 | u64 rx_link_errors; |
329 | u64 rx_length_errors; |
330 | |
331 | u32 tx_timeout_count; |
332 | |
333 | /* RX */ |
334 | struct fm10k_ring *rx_ring[MAX_QUEUES]; |
335 | |
336 | /* Queueing vectors */ |
337 | struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; |
338 | struct msix_entry *msix_entries; |
339 | int num_q_vectors; /* current number of q_vectors for device */ |
340 | struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE]; |
341 | |
342 | /* SR-IOV information management structure */ |
343 | struct fm10k_iov_data *iov_data; |
344 | |
345 | struct fm10k_hw_stats stats; |
346 | struct fm10k_hw hw; |
347 | /* Mailbox lock */ |
348 | spinlock_t mbx_lock; |
349 | u32 __iomem *uc_addr; |
350 | u32 __iomem *sw_addr; |
351 | u16 msg_enable; |
352 | u16 tx_ring_count; |
353 | u16 rx_ring_count; |
354 | struct timer_list service_timer; |
355 | struct work_struct service_task; |
356 | unsigned long next_stats_update; |
357 | unsigned long next_tx_hang_check; |
358 | unsigned long last_reset; |
359 | unsigned long link_down_event; |
360 | bool host_ready; |
361 | bool lport_map_failed; |
362 | |
363 | u32 reta[FM10K_RETA_SIZE]; |
364 | u32 [FM10K_RSSRK_SIZE]; |
365 | |
366 | /* UDP encapsulation port tracking information */ |
367 | __be16 vxlan_port; |
368 | __be16 geneve_port; |
369 | |
370 | /* MAC/VLAN update queue */ |
371 | struct list_head macvlan_requests; |
372 | struct delayed_work macvlan_task; |
373 | /* MAC/VLAN update queue lock */ |
374 | spinlock_t macvlan_lock; |
375 | |
376 | #ifdef CONFIG_DEBUG_FS |
377 | struct dentry *dbg_intfc; |
378 | #endif /* CONFIG_DEBUG_FS */ |
379 | |
380 | #ifdef CONFIG_DCB |
381 | u8 pfc_en; |
382 | #endif |
383 | u8 rx_pause; |
384 | |
385 | /* GLORT resources in use by PF */ |
386 | u16 glort; |
387 | u16 glort_count; |
388 | |
389 | /* VLAN ID for updating multicast/unicast lists */ |
390 | u16 vid; |
391 | }; |
392 | |
393 | static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) |
394 | { |
395 | spin_lock(lock: &interface->mbx_lock); |
396 | } |
397 | |
398 | static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface) |
399 | { |
400 | spin_unlock(lock: &interface->mbx_lock); |
401 | } |
402 | |
403 | static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface) |
404 | { |
405 | return spin_trylock(lock: &interface->mbx_lock); |
406 | } |
407 | |
408 | /* fm10k_test_staterr - test bits in Rx descriptor status and error fields */ |
409 | static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc, |
410 | const u32 stat_err_bits) |
411 | { |
412 | return rx_desc->d.staterr & cpu_to_le32(stat_err_bits); |
413 | } |
414 | |
415 | /* fm10k_desc_unused - calculate if we have unused descriptors */ |
416 | static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) |
417 | { |
418 | s16 unused = ring->next_to_clean - ring->next_to_use - 1; |
419 | |
420 | return likely(unused < 0) ? unused + ring->count : unused; |
421 | } |
422 | |
423 | #define FM10K_TX_DESC(R, i) \ |
424 | (&(((struct fm10k_tx_desc *)((R)->desc))[i])) |
425 | #define FM10K_RX_DESC(R, i) \ |
426 | (&(((union fm10k_rx_desc *)((R)->desc))[i])) |
427 | |
428 | #define FM10K_MAX_TXD_PWR 14 |
429 | #define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR) |
430 | |
431 | /* Tx Descriptors needed, worst case */ |
432 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) |
433 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
434 | |
435 | enum fm10k_tx_flags { |
436 | /* Tx offload flags */ |
437 | FM10K_TX_FLAGS_CSUM = 0x01, |
438 | }; |
439 | |
440 | /* This structure is stored as little endian values as that is the native |
441 | * format of the Rx descriptor. The ordering of these fields is reversed |
442 | * from the actual ftag header to allow for a single bswap to take care |
443 | * of placing all of the values in network order |
444 | */ |
445 | union fm10k_ftag_info { |
446 | __le64 ftag; |
447 | struct { |
448 | /* dglort and sglort combined into a single 32bit desc read */ |
449 | __le32 glort; |
450 | /* upper 16 bits of VLAN are reserved 0 for swpri_type_user */ |
451 | __le32 vlan; |
452 | } d; |
453 | struct { |
454 | __le16 dglort; |
455 | __le16 sglort; |
456 | __le16 vlan; |
457 | __le16 swpri_type_user; |
458 | } w; |
459 | }; |
460 | |
461 | struct fm10k_cb { |
462 | union { |
463 | __le64 tstamp; |
464 | unsigned long ts_tx_timeout; |
465 | }; |
466 | union fm10k_ftag_info fi; |
467 | }; |
468 | |
469 | #define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb) |
470 | |
471 | /* main */ |
472 | extern char fm10k_driver_name[]; |
473 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); |
474 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); |
475 | __be16 fm10k_tx_encap_offload(struct sk_buff *skb); |
476 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, |
477 | struct fm10k_ring *tx_ring); |
478 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); |
479 | u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw); |
480 | bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring); |
481 | void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); |
482 | |
483 | /* PCI */ |
484 | void fm10k_mbx_free_irq(struct fm10k_intfc *); |
485 | int fm10k_mbx_request_irq(struct fm10k_intfc *); |
486 | void fm10k_qv_free_irq(struct fm10k_intfc *interface); |
487 | int fm10k_qv_request_irq(struct fm10k_intfc *interface); |
488 | int fm10k_register_pci_driver(void); |
489 | void fm10k_unregister_pci_driver(void); |
490 | void fm10k_up(struct fm10k_intfc *interface); |
491 | void fm10k_down(struct fm10k_intfc *interface); |
492 | void fm10k_update_stats(struct fm10k_intfc *interface); |
493 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); |
494 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); |
495 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); |
496 | |
497 | /* Netdev */ |
498 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); |
499 | int fm10k_setup_rx_resources(struct fm10k_ring *); |
500 | int fm10k_setup_tx_resources(struct fm10k_ring *); |
501 | void fm10k_free_rx_resources(struct fm10k_ring *); |
502 | void fm10k_free_tx_resources(struct fm10k_ring *); |
503 | void fm10k_clean_all_rx_rings(struct fm10k_intfc *); |
504 | void fm10k_clean_all_tx_rings(struct fm10k_intfc *); |
505 | void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *, |
506 | struct fm10k_tx_buffer *); |
507 | void fm10k_restore_rx_state(struct fm10k_intfc *); |
508 | void fm10k_reset_rx_state(struct fm10k_intfc *); |
509 | int fm10k_setup_tc(struct net_device *dev, u8 tc); |
510 | int fm10k_open(struct net_device *netdev); |
511 | int fm10k_close(struct net_device *netdev); |
512 | int fm10k_queue_vlan_request(struct fm10k_intfc *interface, u32 vid, |
513 | u8 vsi, bool set); |
514 | int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort, |
515 | const unsigned char *addr, u16 vid, bool set); |
516 | void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface, |
517 | u16 glort, bool vlans); |
518 | |
519 | /* Ethtool */ |
520 | void fm10k_set_ethtool_ops(struct net_device *dev); |
521 | void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir); |
522 | |
523 | /* IOV */ |
524 | s32 fm10k_iov_event(struct fm10k_intfc *interface); |
525 | s32 fm10k_iov_mbx(struct fm10k_intfc *interface); |
526 | void fm10k_iov_suspend(struct pci_dev *pdev); |
527 | int fm10k_iov_resume(struct pci_dev *pdev); |
528 | void fm10k_iov_disable(struct pci_dev *pdev); |
529 | int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs); |
530 | void fm10k_iov_update_stats(struct fm10k_intfc *interface); |
531 | s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); |
532 | int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); |
533 | int fm10k_ndo_set_vf_vlan(struct net_device *netdev, |
534 | int vf_idx, u16 vid, u8 qos, __be16 vlan_proto); |
535 | int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, |
536 | int __always_unused min_rate, int max_rate); |
537 | int fm10k_ndo_get_vf_config(struct net_device *netdev, |
538 | int vf_idx, struct ifla_vf_info *ivi); |
539 | int fm10k_ndo_get_vf_stats(struct net_device *netdev, |
540 | int vf_idx, struct ifla_vf_stats *stats); |
541 | |
542 | /* DebugFS */ |
543 | #ifdef CONFIG_DEBUG_FS |
544 | void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector); |
545 | void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector); |
546 | void fm10k_dbg_intfc_init(struct fm10k_intfc *interface); |
547 | void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface); |
548 | void fm10k_dbg_init(void); |
549 | void fm10k_dbg_exit(void); |
550 | #else |
551 | static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} |
552 | static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} |
553 | static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {} |
554 | static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {} |
555 | static inline void fm10k_dbg_init(void) {} |
556 | static inline void fm10k_dbg_exit(void) {} |
557 | #endif /* CONFIG_DEBUG_FS */ |
558 | |
559 | /* DCB */ |
560 | #ifdef CONFIG_DCB |
561 | void fm10k_dcbnl_set_ops(struct net_device *dev); |
562 | #else |
563 | static inline void fm10k_dcbnl_set_ops(struct net_device *dev) {} |
564 | #endif |
565 | #endif /* _FM10K_H_ */ |
566 | |