1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
3 | |
4 | #ifndef _IXGBEVF_H_ |
5 | #define _IXGBEVF_H_ |
6 | |
7 | #include <linux/types.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/timer.h> |
10 | #include <linux/io.h> |
11 | #include <linux/netdevice.h> |
12 | #include <linux/if_vlan.h> |
13 | #include <linux/u64_stats_sync.h> |
14 | #include <net/xdp.h> |
15 | |
16 | #include "vf.h" |
17 | #include "ipsec.h" |
18 | |
19 | #define IXGBE_MAX_TXD_PWR 14 |
20 | #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) |
21 | |
22 | /* Tx Descriptors needed, worst case */ |
23 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) |
24 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
25 | |
26 | /* wrapper around a pointer to a socket buffer, |
27 | * so a DMA handle can be stored along with the buffer |
28 | */ |
29 | struct ixgbevf_tx_buffer { |
30 | union ixgbe_adv_tx_desc *next_to_watch; |
31 | unsigned long time_stamp; |
32 | union { |
33 | struct sk_buff *skb; |
34 | /* XDP uses address ptr on irq_clean */ |
35 | void *data; |
36 | }; |
37 | unsigned int bytecount; |
38 | unsigned short gso_segs; |
39 | __be16 protocol; |
40 | DEFINE_DMA_UNMAP_ADDR(dma); |
41 | DEFINE_DMA_UNMAP_LEN(len); |
42 | u32 tx_flags; |
43 | }; |
44 | |
45 | struct ixgbevf_rx_buffer { |
46 | dma_addr_t dma; |
47 | struct page *page; |
48 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
49 | __u32 page_offset; |
50 | #else |
51 | __u16 page_offset; |
52 | #endif |
53 | __u16 pagecnt_bias; |
54 | }; |
55 | |
56 | struct ixgbevf_stats { |
57 | u64 packets; |
58 | u64 bytes; |
59 | }; |
60 | |
61 | struct ixgbevf_tx_queue_stats { |
62 | u64 restart_queue; |
63 | u64 tx_busy; |
64 | u64 tx_done_old; |
65 | }; |
66 | |
67 | struct ixgbevf_rx_queue_stats { |
68 | u64 alloc_rx_page_failed; |
69 | u64 alloc_rx_buff_failed; |
70 | u64 alloc_rx_page; |
71 | u64 csum_err; |
72 | }; |
73 | |
74 | enum ixgbevf_ring_state_t { |
75 | __IXGBEVF_RX_3K_BUFFER, |
76 | __IXGBEVF_RX_BUILD_SKB_ENABLED, |
77 | __IXGBEVF_TX_DETECT_HANG, |
78 | __IXGBEVF_HANG_CHECK_ARMED, |
79 | __IXGBEVF_TX_XDP_RING, |
80 | __IXGBEVF_TX_XDP_RING_PRIMED, |
81 | }; |
82 | |
83 | #define ring_is_xdp(ring) \ |
84 | test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) |
85 | #define set_ring_xdp(ring) \ |
86 | set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) |
87 | #define clear_ring_xdp(ring) \ |
88 | clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) |
89 | |
90 | struct ixgbevf_ring { |
91 | struct ixgbevf_ring *next; |
92 | struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */ |
93 | struct net_device *netdev; |
94 | struct bpf_prog *xdp_prog; |
95 | struct device *dev; |
96 | void *desc; /* descriptor ring memory */ |
97 | dma_addr_t dma; /* phys. address of descriptor ring */ |
98 | unsigned int size; /* length in bytes */ |
99 | u16 count; /* amount of descriptors */ |
100 | u16 next_to_use; |
101 | u16 next_to_clean; |
102 | u16 next_to_alloc; |
103 | |
104 | union { |
105 | struct ixgbevf_tx_buffer *tx_buffer_info; |
106 | struct ixgbevf_rx_buffer *rx_buffer_info; |
107 | }; |
108 | unsigned long state; |
109 | struct ixgbevf_stats stats; |
110 | struct u64_stats_sync syncp; |
111 | union { |
112 | struct ixgbevf_tx_queue_stats tx_stats; |
113 | struct ixgbevf_rx_queue_stats rx_stats; |
114 | }; |
115 | struct xdp_rxq_info xdp_rxq; |
116 | u64 hw_csum_rx_error; |
117 | u8 __iomem *tail; |
118 | struct sk_buff *skb; |
119 | |
120 | /* holds the special value that gets the hardware register offset |
121 | * associated with this ring, which is different for DCB and RSS modes |
122 | */ |
123 | u16 reg_idx; |
124 | int queue_index; /* needed for multiqueue queue management */ |
125 | } ____cacheline_internodealigned_in_smp; |
126 | |
127 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
128 | #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
129 | |
130 | #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES |
131 | #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES |
132 | #define MAX_XDP_QUEUES IXGBE_VF_MAX_TX_QUEUES |
133 | #define 2 |
134 | #define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ |
135 | #define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ |
136 | #define 40 |
137 | #define 10 /* 10 registers for RSS key */ |
138 | |
139 | #define IXGBEVF_DEFAULT_TXD 1024 |
140 | #define IXGBEVF_DEFAULT_RXD 512 |
141 | #define IXGBEVF_MAX_TXD 4096 |
142 | #define IXGBEVF_MIN_TXD 64 |
143 | #define IXGBEVF_MAX_RXD 4096 |
144 | #define IXGBEVF_MIN_RXD 64 |
145 | |
146 | /* Supported Rx Buffer Sizes */ |
147 | #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ |
148 | #define IXGBEVF_RXBUFFER_2048 2048 |
149 | #define IXGBEVF_RXBUFFER_3072 3072 |
150 | |
151 | #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 |
152 | |
153 | #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) |
154 | |
155 | #define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
156 | #if (PAGE_SIZE < 8192) |
157 | #define IXGBEVF_MAX_FRAME_BUILD_SKB \ |
158 | (SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD) |
159 | #else |
160 | #define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048 |
161 | #endif |
162 | |
163 | #define IXGBE_TX_FLAGS_CSUM BIT(0) |
164 | #define IXGBE_TX_FLAGS_VLAN BIT(1) |
165 | #define IXGBE_TX_FLAGS_TSO BIT(2) |
166 | #define IXGBE_TX_FLAGS_IPV4 BIT(3) |
167 | #define IXGBE_TX_FLAGS_IPSEC BIT(4) |
168 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
169 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 |
170 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
171 | |
172 | #define ring_uses_large_buffer(ring) \ |
173 | test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) |
174 | #define set_ring_uses_large_buffer(ring) \ |
175 | set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) |
176 | #define clear_ring_uses_large_buffer(ring) \ |
177 | clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) |
178 | |
179 | #define ring_uses_build_skb(ring) \ |
180 | test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) |
181 | #define set_ring_build_skb_enabled(ring) \ |
182 | set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) |
183 | #define clear_ring_build_skb_enabled(ring) \ |
184 | clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) |
185 | |
186 | static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring) |
187 | { |
188 | #if (PAGE_SIZE < 8192) |
189 | if (ring_uses_large_buffer(ring)) |
190 | return IXGBEVF_RXBUFFER_3072; |
191 | |
192 | if (ring_uses_build_skb(ring)) |
193 | return IXGBEVF_MAX_FRAME_BUILD_SKB; |
194 | #endif |
195 | return IXGBEVF_RXBUFFER_2048; |
196 | } |
197 | |
198 | static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring) |
199 | { |
200 | #if (PAGE_SIZE < 8192) |
201 | if (ring_uses_large_buffer(ring)) |
202 | return 1; |
203 | #endif |
204 | return 0; |
205 | } |
206 | |
207 | #define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring)) |
208 | |
209 | #define check_for_tx_hang(ring) \ |
210 | test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) |
211 | #define set_check_for_tx_hang(ring) \ |
212 | set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) |
213 | #define clear_check_for_tx_hang(ring) \ |
214 | clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) |
215 | |
216 | struct ixgbevf_ring_container { |
217 | struct ixgbevf_ring *ring; /* pointer to linked list of rings */ |
218 | unsigned int total_bytes; /* total bytes processed this int */ |
219 | unsigned int total_packets; /* total packets processed this int */ |
220 | u8 count; /* total number of rings in vector */ |
221 | u8 itr; /* current ITR setting for ring */ |
222 | }; |
223 | |
224 | /* iterator for handling rings in ring container */ |
225 | #define ixgbevf_for_each_ring(pos, head) \ |
226 | for (pos = (head).ring; pos != NULL; pos = pos->next) |
227 | |
228 | /* MAX_MSIX_Q_VECTORS of these are allocated, |
229 | * but we only use one per queue-specific vector. |
230 | */ |
231 | struct ixgbevf_q_vector { |
232 | struct ixgbevf_adapter *adapter; |
233 | /* index of q_vector within array, also used for finding the bit in |
234 | * EICR and friends that represents the vector for this ring |
235 | */ |
236 | u16 v_idx; |
237 | u16 itr; /* Interrupt throttle rate written to EITR */ |
238 | struct napi_struct napi; |
239 | struct ixgbevf_ring_container rx, tx; |
240 | struct rcu_head rcu; /* to avoid race with update stats on free */ |
241 | char name[IFNAMSIZ + 9]; |
242 | |
243 | /* for dynamic allocation of rings associated with this q_vector */ |
244 | struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp; |
245 | #ifdef CONFIG_NET_RX_BUSY_POLL |
246 | unsigned int state; |
247 | #define IXGBEVF_QV_STATE_IDLE 0 |
248 | #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ |
249 | #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ |
250 | #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ |
251 | #define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) |
252 | #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) |
253 | #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ |
254 | #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ |
255 | #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ |
256 | IXGBEVF_QV_STATE_POLL_YIELD) |
257 | #define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ |
258 | IXGBEVF_QV_STATE_POLL_YIELD) |
259 | spinlock_t lock; |
260 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
261 | }; |
262 | |
263 | /* microsecond values for various ITR rates shifted by 2 to fit itr register |
264 | * with the first 3 bits reserved 0 |
265 | */ |
266 | #define IXGBE_MIN_RSC_ITR 24 |
267 | #define IXGBE_100K_ITR 40 |
268 | #define IXGBE_20K_ITR 200 |
269 | #define IXGBE_12K_ITR 336 |
270 | |
271 | /* Helper macros to switch between ints/sec and what the register uses. |
272 | * And yes, it's the same math going both ways. The lowest value |
273 | * supported by all of the ixgbe hardware is 8. |
274 | */ |
275 | #define EITR_INTS_PER_SEC_TO_REG(_eitr) \ |
276 | ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) |
277 | #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG |
278 | |
279 | /* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ |
280 | static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, |
281 | const u32 stat_err_bits) |
282 | { |
283 | return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); |
284 | } |
285 | |
286 | static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) |
287 | { |
288 | u16 ntc = ring->next_to_clean; |
289 | u16 ntu = ring->next_to_use; |
290 | |
291 | return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; |
292 | } |
293 | |
294 | static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) |
295 | { |
296 | writel(val: value, addr: ring->tail); |
297 | } |
298 | |
299 | #define IXGBEVF_RX_DESC(R, i) \ |
300 | (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) |
301 | #define IXGBEVF_TX_DESC(R, i) \ |
302 | (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) |
303 | #define IXGBEVF_TX_CTXTDESC(R, i) \ |
304 | (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) |
305 | |
306 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ |
307 | |
308 | #define OTHER_VECTOR 1 |
309 | #define NON_Q_VECTORS (OTHER_VECTOR) |
310 | |
311 | #define MAX_MSIX_Q_VECTORS 2 |
312 | |
313 | #define MIN_MSIX_Q_VECTORS 1 |
314 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) |
315 | |
316 | #define IXGBEVF_RX_DMA_ATTR \ |
317 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) |
318 | |
319 | /* board specific private data structure */ |
320 | struct ixgbevf_adapter { |
321 | /* this field must be first, see ixgbevf_process_skb_fields */ |
322 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
323 | |
324 | struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; |
325 | |
326 | /* Interrupt Throttle Rate */ |
327 | u16 rx_itr_setting; |
328 | u16 tx_itr_setting; |
329 | |
330 | /* interrupt masks */ |
331 | u32 eims_enable_mask; |
332 | u32 eims_other; |
333 | |
334 | /* XDP */ |
335 | int num_xdp_queues; |
336 | struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES]; |
337 | |
338 | /* TX */ |
339 | int num_tx_queues; |
340 | struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ |
341 | u64 restart_queue; |
342 | u32 tx_timeout_count; |
343 | u64 tx_ipsec; |
344 | |
345 | /* RX */ |
346 | int num_rx_queues; |
347 | struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ |
348 | u64 hw_csum_rx_error; |
349 | u64 hw_rx_no_dma_resources; |
350 | int num_msix_vectors; |
351 | u64 alloc_rx_page_failed; |
352 | u64 alloc_rx_buff_failed; |
353 | u64 alloc_rx_page; |
354 | u64 rx_ipsec; |
355 | |
356 | struct msix_entry *msix_entries; |
357 | |
358 | /* OS defined structs */ |
359 | struct net_device *netdev; |
360 | struct bpf_prog *xdp_prog; |
361 | struct pci_dev *pdev; |
362 | |
363 | /* structs defined in ixgbe_vf.h */ |
364 | struct ixgbe_hw hw; |
365 | u16 msg_enable; |
366 | /* Interrupt Throttle Rate */ |
367 | u32 eitr_param; |
368 | |
369 | struct ixgbevf_hw_stats stats; |
370 | |
371 | unsigned long state; |
372 | u64 tx_busy; |
373 | unsigned int tx_ring_count; |
374 | unsigned int xdp_ring_count; |
375 | unsigned int rx_ring_count; |
376 | |
377 | u8 __iomem *io_addr; /* Mainly for iounmap use */ |
378 | u32 link_speed; |
379 | bool link_up; |
380 | |
381 | struct timer_list service_timer; |
382 | struct work_struct service_task; |
383 | |
384 | spinlock_t mbx_lock; |
385 | unsigned long last_reset; |
386 | |
387 | u32 *; |
388 | u8 [IXGBEVF_X550_VFRETA_SIZE]; |
389 | u32 flags; |
390 | bool link_state; |
391 | |
392 | #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) |
393 | |
394 | #ifdef CONFIG_XFRM |
395 | struct ixgbevf_ipsec *ipsec; |
396 | #endif /* CONFIG_XFRM */ |
397 | }; |
398 | |
399 | enum ixbgevf_state_t { |
400 | __IXGBEVF_TESTING, |
401 | __IXGBEVF_RESETTING, |
402 | __IXGBEVF_DOWN, |
403 | __IXGBEVF_DISABLED, |
404 | __IXGBEVF_REMOVING, |
405 | __IXGBEVF_SERVICE_SCHED, |
406 | __IXGBEVF_SERVICE_INITED, |
407 | __IXGBEVF_RESET_REQUESTED, |
408 | __IXGBEVF_QUEUE_RESET_REQUESTED, |
409 | }; |
410 | |
411 | enum ixgbevf_boards { |
412 | board_82599_vf, |
413 | board_82599_vf_hv, |
414 | board_X540_vf, |
415 | board_X540_vf_hv, |
416 | board_X550_vf, |
417 | board_X550_vf_hv, |
418 | board_X550EM_x_vf, |
419 | board_X550EM_x_vf_hv, |
420 | board_x550em_a_vf, |
421 | }; |
422 | |
423 | enum ixgbevf_xcast_modes { |
424 | IXGBEVF_XCAST_MODE_NONE = 0, |
425 | IXGBEVF_XCAST_MODE_MULTI, |
426 | IXGBEVF_XCAST_MODE_ALLMULTI, |
427 | IXGBEVF_XCAST_MODE_PROMISC, |
428 | }; |
429 | |
430 | extern const struct ixgbevf_info ixgbevf_82599_vf_info; |
431 | extern const struct ixgbevf_info ixgbevf_X540_vf_info; |
432 | extern const struct ixgbevf_info ixgbevf_X550_vf_info; |
433 | extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; |
434 | extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; |
435 | extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; |
436 | extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; |
437 | |
438 | extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; |
439 | extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; |
440 | extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; |
441 | extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; |
442 | extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; |
443 | |
444 | /* needed by ethtool.c */ |
445 | extern const char ixgbevf_driver_name[]; |
446 | |
447 | int ixgbevf_open(struct net_device *netdev); |
448 | int ixgbevf_close(struct net_device *netdev); |
449 | void ixgbevf_up(struct ixgbevf_adapter *adapter); |
450 | void ixgbevf_down(struct ixgbevf_adapter *adapter); |
451 | void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); |
452 | void ixgbevf_reset(struct ixgbevf_adapter *adapter); |
453 | void ixgbevf_set_ethtool_ops(struct net_device *netdev); |
454 | int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, |
455 | struct ixgbevf_ring *rx_ring); |
456 | int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); |
457 | void ixgbevf_free_rx_resources(struct ixgbevf_ring *); |
458 | void ixgbevf_free_tx_resources(struct ixgbevf_ring *); |
459 | void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); |
460 | int ethtool_ioctl(struct ifreq *ifr); |
461 | |
462 | extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); |
463 | |
464 | #ifdef CONFIG_IXGBEVF_IPSEC |
465 | void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); |
466 | void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); |
467 | void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); |
468 | void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, |
469 | union ixgbe_adv_rx_desc *rx_desc, |
470 | struct sk_buff *skb); |
471 | int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, |
472 | struct ixgbevf_tx_buffer *first, |
473 | struct ixgbevf_ipsec_tx_data *itd); |
474 | #else |
475 | static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) |
476 | { } |
477 | static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) |
478 | { } |
479 | static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { } |
480 | static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, |
481 | union ixgbe_adv_rx_desc *rx_desc, |
482 | struct sk_buff *skb) { } |
483 | static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, |
484 | struct ixgbevf_tx_buffer *first, |
485 | struct ixgbevf_ipsec_tx_data *itd) |
486 | { return 0; } |
487 | #endif /* CONFIG_IXGBEVF_IPSEC */ |
488 | |
489 | #define ixgbevf_hw_to_netdev(hw) \ |
490 | (((struct ixgbevf_adapter *)(hw)->back)->netdev) |
491 | |
492 | #define hw_dbg(hw, format, arg...) \ |
493 | netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg) |
494 | |
495 | s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size); |
496 | s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size); |
497 | |
498 | #endif /* _IXGBEVF_H_ */ |
499 | |