1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2018-2020, Intel Corporation. */ |
3 | |
4 | #include "ice.h" |
5 | |
6 | /** |
7 | * ice_is_arfs_active - helper to check is aRFS is active |
8 | * @vsi: VSI to check |
9 | */ |
10 | static bool ice_is_arfs_active(struct ice_vsi *vsi) |
11 | { |
12 | return !!vsi->arfs_fltr_list; |
13 | } |
14 | |
15 | /** |
16 | * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters |
17 | * @hw: pointer to the HW structure |
18 | * @flow_type: flow type as Flow Director understands it |
19 | * |
20 | * Flow Director will query this function to see if aRFS is currently using |
21 | * the specified flow_type for perfect (4-tuple) filters. |
22 | */ |
23 | bool |
24 | ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type) |
25 | { |
26 | struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; |
27 | struct ice_pf *pf = hw->back; |
28 | struct ice_vsi *vsi; |
29 | |
30 | vsi = ice_get_main_vsi(pf); |
31 | if (!vsi) |
32 | return false; |
33 | |
34 | arfs_fltr_cntrs = vsi->arfs_fltr_cntrs; |
35 | |
36 | /* active counters can be updated by multiple CPUs */ |
37 | smp_mb__before_atomic(); |
38 | switch (flow_type) { |
39 | case ICE_FLTR_PTYPE_NONF_IPV4_UDP: |
40 | return atomic_read(v: &arfs_fltr_cntrs->active_udpv4_cnt) > 0; |
41 | case ICE_FLTR_PTYPE_NONF_IPV6_UDP: |
42 | return atomic_read(v: &arfs_fltr_cntrs->active_udpv6_cnt) > 0; |
43 | case ICE_FLTR_PTYPE_NONF_IPV4_TCP: |
44 | return atomic_read(v: &arfs_fltr_cntrs->active_tcpv4_cnt) > 0; |
45 | case ICE_FLTR_PTYPE_NONF_IPV6_TCP: |
46 | return atomic_read(v: &arfs_fltr_cntrs->active_tcpv6_cnt) > 0; |
47 | default: |
48 | return false; |
49 | } |
50 | } |
51 | |
52 | /** |
53 | * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS |
54 | * @vsi: VSI that aRFS is active on |
55 | * @entry: aRFS entry used to change counters |
56 | * @add: true to increment counter, false to decrement |
57 | */ |
58 | static void |
59 | ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi, |
60 | struct ice_arfs_entry *entry, bool add) |
61 | { |
62 | struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs; |
63 | |
64 | switch (entry->fltr_info.flow_type) { |
65 | case ICE_FLTR_PTYPE_NONF_IPV4_TCP: |
66 | if (add) |
67 | atomic_inc(v: &fltr_cntrs->active_tcpv4_cnt); |
68 | else |
69 | atomic_dec(v: &fltr_cntrs->active_tcpv4_cnt); |
70 | break; |
71 | case ICE_FLTR_PTYPE_NONF_IPV6_TCP: |
72 | if (add) |
73 | atomic_inc(v: &fltr_cntrs->active_tcpv6_cnt); |
74 | else |
75 | atomic_dec(v: &fltr_cntrs->active_tcpv6_cnt); |
76 | break; |
77 | case ICE_FLTR_PTYPE_NONF_IPV4_UDP: |
78 | if (add) |
79 | atomic_inc(v: &fltr_cntrs->active_udpv4_cnt); |
80 | else |
81 | atomic_dec(v: &fltr_cntrs->active_udpv4_cnt); |
82 | break; |
83 | case ICE_FLTR_PTYPE_NONF_IPV6_UDP: |
84 | if (add) |
85 | atomic_inc(v: &fltr_cntrs->active_udpv6_cnt); |
86 | else |
87 | atomic_dec(v: &fltr_cntrs->active_udpv6_cnt); |
88 | break; |
89 | default: |
90 | dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n" , |
91 | entry->fltr_info.flow_type); |
92 | } |
93 | } |
94 | |
95 | /** |
96 | * ice_arfs_del_flow_rules - delete the rules passed in from HW |
97 | * @vsi: VSI for the flow rules that need to be deleted |
98 | * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion |
99 | * |
100 | * Loop through the delete list passed in and remove the rules from HW. After |
101 | * each rule is deleted, disconnect and free the ice_arfs_entry because it is no |
102 | * longer being referenced by the aRFS hash table. |
103 | */ |
104 | static void |
105 | ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head) |
106 | { |
107 | struct ice_arfs_entry *e; |
108 | struct hlist_node *n; |
109 | struct device *dev; |
110 | |
111 | dev = ice_pf_to_dev(vsi->back); |
112 | |
113 | hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { |
114 | int result; |
115 | |
116 | result = ice_fdir_write_fltr(pf: vsi->back, input: &e->fltr_info, add: false, |
117 | is_tun: false); |
118 | if (!result) |
119 | ice_arfs_update_active_fltr_cntrs(vsi, entry: e, add: false); |
120 | else |
121 | dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n" , |
122 | result, e->fltr_state, e->fltr_info.fltr_id, |
123 | e->flow_id, e->fltr_info.q_index); |
124 | |
125 | /* The aRFS hash table is no longer referencing this entry */ |
126 | hlist_del(n: &e->list_entry); |
127 | devm_kfree(dev, p: e); |
128 | } |
129 | } |
130 | |
131 | /** |
132 | * ice_arfs_add_flow_rules - add the rules passed in from HW |
133 | * @vsi: VSI for the flow rules that need to be added |
134 | * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition |
135 | * |
136 | * Loop through the add list passed in and remove the rules from HW. After each |
137 | * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free |
138 | * the ice_arfs_entry(s) because they are still being referenced in the aRFS |
139 | * hash table. |
140 | */ |
141 | static void |
142 | ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head) |
143 | { |
144 | struct ice_arfs_entry_ptr *ep; |
145 | struct hlist_node *n; |
146 | struct device *dev; |
147 | |
148 | dev = ice_pf_to_dev(vsi->back); |
149 | |
150 | hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { |
151 | int result; |
152 | |
153 | result = ice_fdir_write_fltr(pf: vsi->back, |
154 | input: &ep->arfs_entry->fltr_info, add: true, |
155 | is_tun: false); |
156 | if (!result) |
157 | ice_arfs_update_active_fltr_cntrs(vsi, entry: ep->arfs_entry, |
158 | add: true); |
159 | else |
160 | dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n" , |
161 | result, ep->arfs_entry->fltr_state, |
162 | ep->arfs_entry->fltr_info.fltr_id, |
163 | ep->arfs_entry->flow_id, |
164 | ep->arfs_entry->fltr_info.q_index); |
165 | |
166 | hlist_del(n: &ep->list_entry); |
167 | devm_kfree(dev, p: ep); |
168 | } |
169 | } |
170 | |
171 | /** |
172 | * ice_arfs_is_flow_expired - check if the aRFS entry has expired |
173 | * @vsi: VSI containing the aRFS entry |
174 | * @arfs_entry: aRFS entry that's being checked for expiration |
175 | * |
176 | * Return true if the flow has expired, else false. This function should be used |
177 | * to determine whether or not an aRFS entry should be removed from the hardware |
178 | * and software structures. |
179 | */ |
180 | static bool |
181 | ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry) |
182 | { |
183 | #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) |
184 | if (rps_may_expire_flow(dev: vsi->netdev, rxq_index: arfs_entry->fltr_info.q_index, |
185 | flow_id: arfs_entry->flow_id, |
186 | filter_id: arfs_entry->fltr_info.fltr_id)) |
187 | return true; |
188 | |
189 | /* expiration timer only used for UDP filters */ |
190 | if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP && |
191 | arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP) |
192 | return false; |
193 | |
194 | return time_in_range64(arfs_entry->time_activated + |
195 | ICE_ARFS_TIME_DELTA_EXPIRATION, |
196 | arfs_entry->time_activated, get_jiffies_64()); |
197 | } |
198 | |
199 | /** |
200 | * ice_arfs_update_flow_rules - add/delete aRFS rules in HW |
201 | * @vsi: the VSI to be forwarded to |
202 | * @idx: index into the table of aRFS filter lists. Obtained from skb->hash |
203 | * @add_list: list to populate with filters to be added to Flow Director |
204 | * @del_list: list to populate with filters to be deleted from Flow Director |
205 | * |
206 | * Iterate over the hlist at the index given in the aRFS hash table and |
207 | * determine if there are any aRFS entries that need to be either added or |
208 | * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the |
209 | * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and |
210 | * the flow has expired delete the filter from HW. The caller of this function |
211 | * is expected to add/delete rules on the add_list/del_list respectively. |
212 | */ |
213 | static void |
214 | ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx, |
215 | struct hlist_head *add_list, |
216 | struct hlist_head *del_list) |
217 | { |
218 | struct ice_arfs_entry *e; |
219 | struct hlist_node *n; |
220 | struct device *dev; |
221 | |
222 | dev = ice_pf_to_dev(vsi->back); |
223 | |
224 | /* go through the aRFS hlist at this idx and check for needed updates */ |
225 | hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry) |
226 | /* check if filter needs to be added to HW */ |
227 | if (e->fltr_state == ICE_ARFS_INACTIVE) { |
228 | enum ice_fltr_ptype flow_type = e->fltr_info.flow_type; |
229 | struct ice_arfs_entry_ptr *ep = |
230 | devm_kzalloc(dev, size: sizeof(*ep), GFP_ATOMIC); |
231 | |
232 | if (!ep) |
233 | continue; |
234 | INIT_HLIST_NODE(h: &ep->list_entry); |
235 | /* reference aRFS entry to add HW filter */ |
236 | ep->arfs_entry = e; |
237 | hlist_add_head(n: &ep->list_entry, h: add_list); |
238 | e->fltr_state = ICE_ARFS_ACTIVE; |
239 | /* expiration timer only used for UDP flows */ |
240 | if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || |
241 | flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) |
242 | e->time_activated = get_jiffies_64(); |
243 | } else if (e->fltr_state == ICE_ARFS_ACTIVE) { |
244 | /* check if filter needs to be removed from HW */ |
245 | if (ice_arfs_is_flow_expired(vsi, arfs_entry: e)) { |
246 | /* remove aRFS entry from hash table for delete |
247 | * and to prevent referencing it the next time |
248 | * through this hlist index |
249 | */ |
250 | hlist_del(n: &e->list_entry); |
251 | e->fltr_state = ICE_ARFS_TODEL; |
252 | /* save reference to aRFS entry for delete */ |
253 | hlist_add_head(n: &e->list_entry, h: del_list); |
254 | } |
255 | } |
256 | } |
257 | |
258 | /** |
259 | * ice_sync_arfs_fltrs - update all aRFS filters |
260 | * @pf: board private structure |
261 | */ |
262 | void ice_sync_arfs_fltrs(struct ice_pf *pf) |
263 | { |
264 | HLIST_HEAD(tmp_del_list); |
265 | HLIST_HEAD(tmp_add_list); |
266 | struct ice_vsi *pf_vsi; |
267 | unsigned int i; |
268 | |
269 | pf_vsi = ice_get_main_vsi(pf); |
270 | if (!pf_vsi) |
271 | return; |
272 | |
273 | if (!ice_is_arfs_active(vsi: pf_vsi)) |
274 | return; |
275 | |
276 | spin_lock_bh(lock: &pf_vsi->arfs_lock); |
277 | /* Once we process aRFS for the PF VSI get out */ |
278 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) |
279 | ice_arfs_update_flow_rules(vsi: pf_vsi, idx: i, add_list: &tmp_add_list, |
280 | del_list: &tmp_del_list); |
281 | spin_unlock_bh(lock: &pf_vsi->arfs_lock); |
282 | |
283 | /* use list of ice_arfs_entry(s) for delete */ |
284 | ice_arfs_del_flow_rules(vsi: pf_vsi, del_list_head: &tmp_del_list); |
285 | |
286 | /* use list of ice_arfs_entry_ptr(s) for add */ |
287 | ice_arfs_add_flow_rules(vsi: pf_vsi, add_list_head: &tmp_add_list); |
288 | } |
289 | |
290 | /** |
291 | * ice_arfs_build_entry - builds an aRFS entry based on input |
292 | * @vsi: destination VSI for this flow |
293 | * @fk: flow dissector keys for creating the tuple |
294 | * @rxq_idx: Rx queue to steer this flow to |
295 | * @flow_id: passed down from the stack and saved for flow expiration |
296 | * |
297 | * returns an aRFS entry on success and NULL on failure |
298 | */ |
299 | static struct ice_arfs_entry * |
300 | ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk, |
301 | u16 rxq_idx, u32 flow_id) |
302 | { |
303 | struct ice_arfs_entry *arfs_entry; |
304 | struct ice_fdir_fltr *fltr_info; |
305 | u8 ip_proto; |
306 | |
307 | arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back), |
308 | size: sizeof(*arfs_entry), |
309 | GFP_ATOMIC | __GFP_NOWARN); |
310 | if (!arfs_entry) |
311 | return NULL; |
312 | |
313 | fltr_info = &arfs_entry->fltr_info; |
314 | fltr_info->q_index = rxq_idx; |
315 | fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; |
316 | fltr_info->dest_vsi = vsi->idx; |
317 | ip_proto = fk->basic.ip_proto; |
318 | |
319 | if (fk->basic.n_proto == htons(ETH_P_IP)) { |
320 | fltr_info->ip.v4.proto = ip_proto; |
321 | fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? |
322 | ICE_FLTR_PTYPE_NONF_IPV4_TCP : |
323 | ICE_FLTR_PTYPE_NONF_IPV4_UDP; |
324 | fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; |
325 | fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; |
326 | fltr_info->ip.v4.src_port = fk->ports.src; |
327 | fltr_info->ip.v4.dst_port = fk->ports.dst; |
328 | } else { /* ETH_P_IPV6 */ |
329 | fltr_info->ip.v6.proto = ip_proto; |
330 | fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? |
331 | ICE_FLTR_PTYPE_NONF_IPV6_TCP : |
332 | ICE_FLTR_PTYPE_NONF_IPV6_UDP; |
333 | memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, |
334 | sizeof(struct in6_addr)); |
335 | memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, |
336 | sizeof(struct in6_addr)); |
337 | fltr_info->ip.v6.src_port = fk->ports.src; |
338 | fltr_info->ip.v6.dst_port = fk->ports.dst; |
339 | } |
340 | |
341 | arfs_entry->flow_id = flow_id; |
342 | fltr_info->fltr_id = |
343 | atomic_inc_return(v: vsi->arfs_last_fltr_id) % RPS_NO_FILTER; |
344 | |
345 | return arfs_entry; |
346 | } |
347 | |
348 | /** |
349 | * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set |
350 | * @hw: pointer to HW structure |
351 | * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order |
352 | * @l4_proto: IPPROTO_UDP or IPPROTO_TCP |
353 | * |
354 | * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS |
355 | * to check if perfect (4-tuple) flow rules are currently in place by Flow |
356 | * Director. |
357 | */ |
358 | static bool |
359 | ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) |
360 | { |
361 | unsigned long *perfect_fltr = hw->fdir_perfect_fltr; |
362 | |
363 | /* advanced Flow Director disabled, perfect filters always supported */ |
364 | if (!perfect_fltr) |
365 | return true; |
366 | |
367 | if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP) |
368 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr); |
369 | else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP) |
370 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr); |
371 | else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP) |
372 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr); |
373 | else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP) |
374 | return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr); |
375 | |
376 | return false; |
377 | } |
378 | |
379 | /** |
380 | * ice_rx_flow_steer - steer the Rx flow to where application is being run |
381 | * @netdev: ptr to the netdev being adjusted |
382 | * @skb: buffer with required header information |
383 | * @rxq_idx: queue to which the flow needs to move |
384 | * @flow_id: flow identifier provided by the netdev |
385 | * |
386 | * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the |
387 | * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and |
388 | * if the flow_id already exists in the hash table but the rxq_idx has changed |
389 | * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else |
390 | * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table. |
391 | * If neither of the previous conditions are true then add a new entry in the |
392 | * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be |
393 | * added to HW. |
394 | */ |
395 | int |
396 | ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, |
397 | u16 rxq_idx, u32 flow_id) |
398 | { |
399 | struct ice_netdev_priv *np = netdev_priv(dev: netdev); |
400 | struct ice_arfs_entry *arfs_entry; |
401 | struct ice_vsi *vsi = np->vsi; |
402 | struct flow_keys fk; |
403 | struct ice_pf *pf; |
404 | __be16 n_proto; |
405 | u8 ip_proto; |
406 | u16 idx; |
407 | int ret; |
408 | |
409 | /* failed to allocate memory for aRFS so don't crash */ |
410 | if (unlikely(!vsi->arfs_fltr_list)) |
411 | return -ENODEV; |
412 | |
413 | pf = vsi->back; |
414 | |
415 | if (skb->encapsulation) |
416 | return -EPROTONOSUPPORT; |
417 | |
418 | if (!skb_flow_dissect_flow_keys(skb, flow: &fk, flags: 0)) |
419 | return -EPROTONOSUPPORT; |
420 | |
421 | n_proto = fk.basic.n_proto; |
422 | /* Support only IPV4 and IPV6 */ |
423 | if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(iph: ip_hdr(skb))) || |
424 | n_proto == htons(ETH_P_IPV6)) |
425 | ip_proto = fk.basic.ip_proto; |
426 | else |
427 | return -EPROTONOSUPPORT; |
428 | |
429 | /* Support only TCP and UDP */ |
430 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) |
431 | return -EPROTONOSUPPORT; |
432 | |
433 | /* only support 4-tuple filters for aRFS */ |
434 | if (!ice_arfs_is_perfect_flow_set(hw: &pf->hw, l3_proto: n_proto, l4_proto: ip_proto)) |
435 | return -EOPNOTSUPP; |
436 | |
437 | /* choose the aRFS list bucket based on skb hash */ |
438 | idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK; |
439 | /* search for entry in the bucket */ |
440 | spin_lock_bh(lock: &vsi->arfs_lock); |
441 | hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx], |
442 | list_entry) { |
443 | struct ice_fdir_fltr *fltr_info; |
444 | |
445 | /* keep searching for the already existing arfs_entry flow */ |
446 | if (arfs_entry->flow_id != flow_id) |
447 | continue; |
448 | |
449 | fltr_info = &arfs_entry->fltr_info; |
450 | ret = fltr_info->fltr_id; |
451 | |
452 | if (fltr_info->q_index == rxq_idx || |
453 | arfs_entry->fltr_state != ICE_ARFS_ACTIVE) |
454 | goto out; |
455 | |
456 | /* update the queue to forward to on an already existing flow */ |
457 | fltr_info->q_index = rxq_idx; |
458 | arfs_entry->fltr_state = ICE_ARFS_INACTIVE; |
459 | ice_arfs_update_active_fltr_cntrs(vsi, entry: arfs_entry, add: false); |
460 | goto out_schedule_service_task; |
461 | } |
462 | |
463 | arfs_entry = ice_arfs_build_entry(vsi, fk: &fk, rxq_idx, flow_id); |
464 | if (!arfs_entry) { |
465 | ret = -ENOMEM; |
466 | goto out; |
467 | } |
468 | |
469 | ret = arfs_entry->fltr_info.fltr_id; |
470 | INIT_HLIST_NODE(h: &arfs_entry->list_entry); |
471 | hlist_add_head(n: &arfs_entry->list_entry, h: &vsi->arfs_fltr_list[idx]); |
472 | out_schedule_service_task: |
473 | ice_service_task_schedule(pf); |
474 | out: |
475 | spin_unlock_bh(lock: &vsi->arfs_lock); |
476 | return ret; |
477 | } |
478 | |
479 | /** |
480 | * ice_init_arfs_cntrs - initialize aRFS counter values |
481 | * @vsi: VSI that aRFS counters need to be initialized on |
482 | */ |
483 | static int ice_init_arfs_cntrs(struct ice_vsi *vsi) |
484 | { |
485 | if (!vsi || vsi->type != ICE_VSI_PF) |
486 | return -EINVAL; |
487 | |
488 | vsi->arfs_fltr_cntrs = kzalloc(size: sizeof(*vsi->arfs_fltr_cntrs), |
489 | GFP_KERNEL); |
490 | if (!vsi->arfs_fltr_cntrs) |
491 | return -ENOMEM; |
492 | |
493 | vsi->arfs_last_fltr_id = kzalloc(size: sizeof(*vsi->arfs_last_fltr_id), |
494 | GFP_KERNEL); |
495 | if (!vsi->arfs_last_fltr_id) { |
496 | kfree(objp: vsi->arfs_fltr_cntrs); |
497 | vsi->arfs_fltr_cntrs = NULL; |
498 | return -ENOMEM; |
499 | } |
500 | |
501 | return 0; |
502 | } |
503 | |
504 | /** |
505 | * ice_init_arfs - initialize aRFS resources |
506 | * @vsi: the VSI to be forwarded to |
507 | */ |
508 | void ice_init_arfs(struct ice_vsi *vsi) |
509 | { |
510 | struct hlist_head *arfs_fltr_list; |
511 | unsigned int i; |
512 | |
513 | if (!vsi || vsi->type != ICE_VSI_PF) |
514 | return; |
515 | |
516 | arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, size: sizeof(*arfs_fltr_list), |
517 | GFP_KERNEL); |
518 | if (!arfs_fltr_list) |
519 | return; |
520 | |
521 | if (ice_init_arfs_cntrs(vsi)) |
522 | goto free_arfs_fltr_list; |
523 | |
524 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) |
525 | INIT_HLIST_HEAD(&arfs_fltr_list[i]); |
526 | |
527 | spin_lock_init(&vsi->arfs_lock); |
528 | |
529 | vsi->arfs_fltr_list = arfs_fltr_list; |
530 | |
531 | return; |
532 | |
533 | free_arfs_fltr_list: |
534 | kfree(objp: arfs_fltr_list); |
535 | } |
536 | |
537 | /** |
538 | * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS |
539 | * @vsi: the VSI to be forwarded to |
540 | */ |
541 | void ice_clear_arfs(struct ice_vsi *vsi) |
542 | { |
543 | struct device *dev; |
544 | unsigned int i; |
545 | |
546 | if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back || |
547 | !vsi->arfs_fltr_list) |
548 | return; |
549 | |
550 | dev = ice_pf_to_dev(vsi->back); |
551 | for (i = 0; i < ICE_MAX_ARFS_LIST; i++) { |
552 | struct ice_arfs_entry *r; |
553 | struct hlist_node *n; |
554 | |
555 | spin_lock_bh(lock: &vsi->arfs_lock); |
556 | hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i], |
557 | list_entry) { |
558 | hlist_del(n: &r->list_entry); |
559 | devm_kfree(dev, p: r); |
560 | } |
561 | spin_unlock_bh(lock: &vsi->arfs_lock); |
562 | } |
563 | |
564 | kfree(objp: vsi->arfs_fltr_list); |
565 | vsi->arfs_fltr_list = NULL; |
566 | kfree(objp: vsi->arfs_last_fltr_id); |
567 | vsi->arfs_last_fltr_id = NULL; |
568 | kfree(objp: vsi->arfs_fltr_cntrs); |
569 | vsi->arfs_fltr_cntrs = NULL; |
570 | } |
571 | |
572 | /** |
573 | * ice_free_cpu_rx_rmap - free setup CPU reverse map |
574 | * @vsi: the VSI to be forwarded to |
575 | */ |
576 | void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) |
577 | { |
578 | struct net_device *netdev; |
579 | |
580 | if (!vsi || vsi->type != ICE_VSI_PF) |
581 | return; |
582 | |
583 | netdev = vsi->netdev; |
584 | if (!netdev || !netdev->rx_cpu_rmap) |
585 | return; |
586 | |
587 | free_irq_cpu_rmap(rmap: netdev->rx_cpu_rmap); |
588 | netdev->rx_cpu_rmap = NULL; |
589 | } |
590 | |
591 | /** |
592 | * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue |
593 | * @vsi: the VSI to be forwarded to |
594 | */ |
595 | int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) |
596 | { |
597 | struct net_device *netdev; |
598 | struct ice_pf *pf; |
599 | int i; |
600 | |
601 | if (!vsi || vsi->type != ICE_VSI_PF) |
602 | return 0; |
603 | |
604 | pf = vsi->back; |
605 | netdev = vsi->netdev; |
606 | if (!pf || !netdev || !vsi->num_q_vectors) |
607 | return -EINVAL; |
608 | |
609 | netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n" , |
610 | vsi->type, netdev->name, vsi->num_q_vectors); |
611 | |
612 | netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(size: vsi->num_q_vectors); |
613 | if (unlikely(!netdev->rx_cpu_rmap)) |
614 | return -EINVAL; |
615 | |
616 | ice_for_each_q_vector(vsi, i) |
617 | if (irq_cpu_rmap_add(rmap: netdev->rx_cpu_rmap, |
618 | irq: vsi->q_vectors[i]->irq.virq)) { |
619 | ice_free_cpu_rx_rmap(vsi); |
620 | return -EINVAL; |
621 | } |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | /** |
627 | * ice_remove_arfs - remove/clear all aRFS resources |
628 | * @pf: device private structure |
629 | */ |
630 | void ice_remove_arfs(struct ice_pf *pf) |
631 | { |
632 | struct ice_vsi *pf_vsi; |
633 | |
634 | pf_vsi = ice_get_main_vsi(pf); |
635 | if (!pf_vsi) |
636 | return; |
637 | |
638 | ice_clear_arfs(vsi: pf_vsi); |
639 | } |
640 | |
641 | /** |
642 | * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset |
643 | * @pf: device private structure |
644 | */ |
645 | void ice_rebuild_arfs(struct ice_pf *pf) |
646 | { |
647 | struct ice_vsi *pf_vsi; |
648 | |
649 | pf_vsi = ice_get_main_vsi(pf); |
650 | if (!pf_vsi) |
651 | return; |
652 | |
653 | ice_remove_arfs(pf); |
654 | ice_init_arfs(vsi: pf_vsi); |
655 | } |
656 | |