1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2005-2006 Fen Systems Ltd. |
5 | * Copyright 2006-2013 Solarflare Communications Inc. |
6 | */ |
7 | |
8 | #ifndef EFX_EFX_H |
9 | #define EFX_EFX_H |
10 | |
11 | #include <linux/indirect_call_wrapper.h> |
12 | #include "net_driver.h" |
13 | #include "ef100_rx.h" |
14 | #include "ef100_tx.h" |
15 | #include "efx_common.h" |
16 | #include "filter.h" |
17 | |
18 | int efx_net_open(struct net_device *net_dev); |
19 | int efx_net_stop(struct net_device *net_dev); |
20 | |
21 | /* TX */ |
22 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); |
23 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
24 | struct net_device *net_dev); |
25 | netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); |
26 | static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
27 | { |
28 | return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, |
29 | ef100_enqueue_skb, __efx_enqueue_skb, |
30 | tx_queue, skb); |
31 | } |
32 | void efx_xmit_done_single(struct efx_tx_queue *tx_queue); |
33 | extern unsigned int efx_piobuf_size; |
34 | |
35 | /* RX */ |
36 | void __efx_rx_packet(struct efx_channel *channel); |
37 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
38 | unsigned int n_frags, unsigned int len, u16 flags); |
39 | static inline void efx_rx_flush_packet(struct efx_channel *channel) |
40 | { |
41 | if (channel->rx_pkt_n_frags) |
42 | INDIRECT_CALL_2(channel->efx->type->rx_packet, |
43 | __ef100_rx_packet, __efx_rx_packet, |
44 | channel); |
45 | } |
46 | static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) |
47 | { |
48 | if (efx->type->rx_buf_hash_valid) |
49 | return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, |
50 | ef100_rx_buf_hash_valid, |
51 | prefix); |
52 | return true; |
53 | } |
54 | |
55 | /* Maximum number of TCP segments we support for soft-TSO */ |
56 | #define EFX_TSO_MAX_SEGS 100 |
57 | |
58 | /* The smallest [rt]xq_entries that the driver supports. RX minimum |
59 | * is a bit arbitrary. For TX, we must have space for at least 2 |
60 | * TSO skbs. |
61 | */ |
62 | #define EFX_RXQ_MIN_ENT 128U |
63 | #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) |
64 | |
65 | /* All EF10 architecture NICs steal one bit of the DMAQ size for various |
66 | * other purposes when counting TxQ entries, so we halve the queue size. |
67 | */ |
68 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ |
69 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) |
70 | |
71 | static inline bool (struct efx_nic *efx) |
72 | { |
73 | return efx->rss_spread > 1; |
74 | } |
75 | |
76 | /* Filters */ |
77 | |
78 | /** |
79 | * efx_filter_insert_filter - add or replace a filter |
80 | * @efx: NIC in which to insert the filter |
81 | * @spec: Specification for the filter |
82 | * @replace_equal: Flag for whether the specified filter may replace an |
83 | * existing filter with equal priority |
84 | * |
85 | * On success, return the filter ID. |
86 | * On failure, return a negative error code. |
87 | * |
88 | * If existing filters have equal match values to the new filter spec, |
89 | * then the new filter might replace them or the function might fail, |
90 | * as follows. |
91 | * |
92 | * 1. If the existing filters have lower priority, or @replace_equal |
93 | * is set and they have equal priority, replace them. |
94 | * |
95 | * 2. If the existing filters have higher priority, return -%EPERM. |
96 | * |
97 | * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not |
98 | * support delivery to multiple recipients, return -%EEXIST. |
99 | * |
100 | * This implies that filters for multiple multicast recipients must |
101 | * all be inserted with the same priority and @replace_equal = %false. |
102 | */ |
103 | static inline s32 efx_filter_insert_filter(struct efx_nic *efx, |
104 | struct efx_filter_spec *spec, |
105 | bool replace_equal) |
106 | { |
107 | return efx->type->filter_insert(efx, spec, replace_equal); |
108 | } |
109 | |
110 | /** |
111 | * efx_filter_remove_id_safe - remove a filter by ID, carefully |
112 | * @efx: NIC from which to remove the filter |
113 | * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
114 | * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
115 | * |
116 | * This function will range-check @filter_id, so it is safe to call |
117 | * with a value passed from userland. |
118 | */ |
119 | static inline int efx_filter_remove_id_safe(struct efx_nic *efx, |
120 | enum efx_filter_priority priority, |
121 | u32 filter_id) |
122 | { |
123 | return efx->type->filter_remove_safe(efx, priority, filter_id); |
124 | } |
125 | |
126 | /** |
127 | * efx_filter_get_filter_safe - retrieve a filter by ID, carefully |
128 | * @efx: NIC from which to remove the filter |
129 | * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
130 | * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
131 | * @spec: Buffer in which to store filter specification |
132 | * |
133 | * This function will range-check @filter_id, so it is safe to call |
134 | * with a value passed from userland. |
135 | */ |
136 | static inline int |
137 | efx_filter_get_filter_safe(struct efx_nic *efx, |
138 | enum efx_filter_priority priority, |
139 | u32 filter_id, struct efx_filter_spec *spec) |
140 | { |
141 | return efx->type->filter_get_safe(efx, priority, filter_id, spec); |
142 | } |
143 | |
144 | static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, |
145 | enum efx_filter_priority priority) |
146 | { |
147 | return efx->type->filter_count_rx_used(efx, priority); |
148 | } |
149 | static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) |
150 | { |
151 | return efx->type->filter_get_rx_id_limit(efx); |
152 | } |
153 | static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, |
154 | enum efx_filter_priority priority, |
155 | u32 *buf, u32 size) |
156 | { |
157 | return efx->type->filter_get_rx_ids(efx, priority, buf, size); |
158 | } |
159 | |
160 | /* RSS contexts */ |
161 | static inline bool (struct efx_rss_context *ctx) |
162 | { |
163 | return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; |
164 | } |
165 | |
166 | /* Ethtool support */ |
167 | extern const struct ethtool_ops efx_ethtool_ops; |
168 | |
169 | /* Global */ |
170 | unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); |
171 | unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); |
172 | int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, |
173 | unsigned int rx_usecs, bool rx_adaptive, |
174 | bool rx_may_override_tx); |
175 | void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, |
176 | unsigned int *rx_usecs, bool *rx_adaptive); |
177 | |
178 | /* Update the generic software stats in the passed stats array */ |
179 | void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); |
180 | |
181 | /* MTD */ |
182 | #ifdef CONFIG_SFC_MTD |
183 | int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, |
184 | size_t n_parts, size_t sizeof_part); |
185 | static inline int efx_mtd_probe(struct efx_nic *efx) |
186 | { |
187 | return efx->type->mtd_probe(efx); |
188 | } |
189 | void efx_mtd_rename(struct efx_nic *efx); |
190 | void efx_mtd_remove(struct efx_nic *efx); |
191 | #else |
192 | static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } |
193 | static inline void efx_mtd_rename(struct efx_nic *efx) {} |
194 | static inline void efx_mtd_remove(struct efx_nic *efx) {} |
195 | #endif |
196 | |
197 | #ifdef CONFIG_SFC_SRIOV |
198 | static inline unsigned int efx_vf_size(struct efx_nic *efx) |
199 | { |
200 | return 1 << efx->vi_scale; |
201 | } |
202 | #endif |
203 | |
204 | static inline void efx_device_detach_sync(struct efx_nic *efx) |
205 | { |
206 | struct net_device *dev = efx->net_dev; |
207 | |
208 | /* We must stop reps (which use our TX) before we stop ourselves. */ |
209 | efx_detach_reps(efx); |
210 | |
211 | /* Lock/freeze all TX queues so that we can be sure the |
212 | * TX scheduler is stopped when we're done and before |
213 | * netif_device_present() becomes false. |
214 | */ |
215 | netif_tx_lock_bh(dev); |
216 | netif_device_detach(dev); |
217 | netif_tx_unlock_bh(dev); |
218 | } |
219 | |
220 | static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) |
221 | { |
222 | if ((efx->state != STATE_DISABLED) && !efx->reset_pending) { |
223 | netif_device_attach(dev: efx->net_dev); |
224 | if (efx->state == STATE_NET_UP) |
225 | efx_attach_reps(efx); |
226 | } |
227 | } |
228 | |
229 | static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) |
230 | { |
231 | if (WARN_ON(down_read_trylock(sem))) { |
232 | up_read(sem); |
233 | return false; |
234 | } |
235 | return true; |
236 | } |
237 | |
238 | int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, |
239 | bool flush); |
240 | |
241 | #endif /* EFX_EFX_H */ |
242 | |