1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34#include <net/ipv6.h>
35
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4_tcb.h"
39#include "t4_values.h"
40#include "clip_tbl.h"
41#include "l2t.h"
42#include "smt.h"
43#include "t4fw_api.h"
44#include "cxgb4_filter.h"
45
46static inline bool is_field_set(u32 val, u32 mask)
47{
48 return val || mask;
49}
50
51static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
52{
53 return !(conf & conf_mask) && is_field_set(val, mask);
54}
55
56static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57 unsigned int ftid, u16 word, u64 mask, u64 val,
58 int no_reply)
59{
60 struct cpl_set_tcb_field *req;
61 struct sk_buff *skb;
62
63 skb = alloc_skb(size: sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
64 if (!skb)
65 return -ENOMEM;
66
67 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, len: sizeof(*req));
68 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69 req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71 NO_REPLY_V(no_reply));
72 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73 req->mask = cpu_to_be64(mask);
74 req->val = cpu_to_be64(val);
75 set_wr_txq(skb, prio: CPL_PRIORITY_CONTROL, queue: f->fs.val.iport & 0x3);
76 t4_ofld_send(adap, skb);
77 return 0;
78}
79
80/* Set one of the t_flags bits in the TCB.
81 */
82static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83 unsigned int ftid, unsigned int bit_pos,
84 unsigned int val, int no_reply)
85{
86 return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, mask: 1ULL << bit_pos,
87 val: (unsigned long long)val << bit_pos, no_reply);
88}
89
90static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
91{
92 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
94
95 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98 sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99 OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100 abort_req->rsvd0 = htonl(0);
101 abort_req->rsvd1 = 0;
102 abort_req->cmd = CPL_ABORT_NO_RST;
103}
104
105static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
106{
107 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
109
110 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113 sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114 OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115 abort_rpl->rsvd0 = htonl(0);
116 abort_rpl->rsvd1 = 0;
117 abort_rpl->cmd = CPL_ABORT_NO_RST;
118}
119
120static void mk_set_tcb_ulp(struct filter_entry *f,
121 struct cpl_set_tcb_field *req,
122 unsigned int word, u64 mask, u64 val,
123 u8 cookie, int no_reply)
124{
125 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
127
128 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131 sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133 req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
134 QUEUENO_V(0));
135 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136 req->mask = cpu_to_be64(mask);
137 req->val = cpu_to_be64(val);
138 sc = (struct ulptx_idata *)(req + 1);
139 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
140 sc->len = htonl(0);
141}
142
143static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
144{
145 int err;
146
147 /* do a set-tcb for smac-sel and CWR bit.. */
148 err = set_tcb_field(adap, f, ftid: f->tid, TCB_SMAC_SEL_W,
149 TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
150 TCB_SMAC_SEL_V(f->smt->idx), no_reply: 1);
151 if (err)
152 goto smac_err;
153
154 err = set_tcb_tflag(adap, f, ftid: f->tid, TF_CCTRL_CWR_S, val: 1, no_reply: 1);
155 if (!err)
156 return 0;
157
158smac_err:
159 dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
160 f->tid, err);
161 return err;
162}
163
164static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165 unsigned int tid, bool dip, bool sip, bool dp,
166 bool sp)
167{
168 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
169 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
170
171 if (dip) {
172 if (f->fs.type) {
173 set_tcb_field(adap, f, ftid: tid, TCB_SND_UNA_RAW_W,
174 WORD_MASK, val: f->fs.nat_lip[15] |
175 f->fs.nat_lip[14] << 8 |
176 f->fs.nat_lip[13] << 16 |
177 (u64)f->fs.nat_lip[12] << 24, no_reply: 1);
178
179 set_tcb_field(adap, f, ftid: tid, TCB_SND_UNA_RAW_W + 1,
180 WORD_MASK, val: f->fs.nat_lip[11] |
181 f->fs.nat_lip[10] << 8 |
182 f->fs.nat_lip[9] << 16 |
183 (u64)f->fs.nat_lip[8] << 24, no_reply: 1);
184
185 set_tcb_field(adap, f, ftid: tid, TCB_SND_UNA_RAW_W + 2,
186 WORD_MASK, val: f->fs.nat_lip[7] |
187 f->fs.nat_lip[6] << 8 |
188 f->fs.nat_lip[5] << 16 |
189 (u64)f->fs.nat_lip[4] << 24, no_reply: 1);
190
191 set_tcb_field(adap, f, ftid: tid, TCB_SND_UNA_RAW_W + 3,
192 WORD_MASK, val: f->fs.nat_lip[3] |
193 f->fs.nat_lip[2] << 8 |
194 f->fs.nat_lip[1] << 16 |
195 (u64)f->fs.nat_lip[0] << 24, no_reply: 1);
196 } else {
197 set_tcb_field(adap, f, ftid: tid, TCB_RX_FRAG3_LEN_RAW_W,
198 WORD_MASK, val: f->fs.nat_lip[3] |
199 f->fs.nat_lip[2] << 8 |
200 f->fs.nat_lip[1] << 16 |
201 (u64)f->fs.nat_lip[0] << 24, no_reply: 1);
202 }
203 }
204
205 if (sip) {
206 if (f->fs.type) {
207 set_tcb_field(adap, f, ftid: tid, TCB_RX_FRAG2_PTR_RAW_W,
208 WORD_MASK, val: f->fs.nat_fip[15] |
209 f->fs.nat_fip[14] << 8 |
210 f->fs.nat_fip[13] << 16 |
211 (u64)f->fs.nat_fip[12] << 24, no_reply: 1);
212
213 set_tcb_field(adap, f, ftid: tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
214 WORD_MASK, val: f->fs.nat_fip[11] |
215 f->fs.nat_fip[10] << 8 |
216 f->fs.nat_fip[9] << 16 |
217 (u64)f->fs.nat_fip[8] << 24, no_reply: 1);
218
219 set_tcb_field(adap, f, ftid: tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
220 WORD_MASK, val: f->fs.nat_fip[7] |
221 f->fs.nat_fip[6] << 8 |
222 f->fs.nat_fip[5] << 16 |
223 (u64)f->fs.nat_fip[4] << 24, no_reply: 1);
224
225 set_tcb_field(adap, f, ftid: tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
226 WORD_MASK, val: f->fs.nat_fip[3] |
227 f->fs.nat_fip[2] << 8 |
228 f->fs.nat_fip[1] << 16 |
229 (u64)f->fs.nat_fip[0] << 24, no_reply: 1);
230
231 } else {
232 set_tcb_field(adap, f, ftid: tid,
233 TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
234 WORD_MASK, val: f->fs.nat_fip[3] |
235 f->fs.nat_fip[2] << 8 |
236 f->fs.nat_fip[1] << 16 |
237 (u64)f->fs.nat_fip[0] << 24, no_reply: 1);
238 }
239 }
240
241 set_tcb_field(adap, f, ftid: tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
242 val: (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
243 (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
244 no_reply: 1);
245}
246
247/* Validate filter spec against configuration done on the card. */
248static int validate_filter(struct net_device *dev,
249 struct ch_filter_specification *fs)
250{
251 struct adapter *adapter = netdev2adap(dev);
252 u32 fconf, iconf;
253
254 /* Check for unconfigured fields being used. */
255 iconf = adapter->params.tp.ingress_config;
256 fconf = fs->hash ? adapter->params.tp.filter_mask :
257 adapter->params.tp.vlan_pri_map;
258
259 if (unsupported(conf: fconf, FCOE_F, val: fs->val.fcoe, mask: fs->mask.fcoe) ||
260 unsupported(conf: fconf, PORT_F, val: fs->val.iport, mask: fs->mask.iport) ||
261 unsupported(conf: fconf, TOS_F, val: fs->val.tos, mask: fs->mask.tos) ||
262 unsupported(conf: fconf, ETHERTYPE_F, val: fs->val.ethtype,
263 mask: fs->mask.ethtype) ||
264 unsupported(conf: fconf, MACMATCH_F, val: fs->val.macidx, mask: fs->mask.macidx) ||
265 unsupported(conf: fconf, MPSHITTYPE_F, val: fs->val.matchtype,
266 mask: fs->mask.matchtype) ||
267 unsupported(conf: fconf, FRAGMENTATION_F, val: fs->val.frag, mask: fs->mask.frag) ||
268 unsupported(conf: fconf, PROTOCOL_F, val: fs->val.proto, mask: fs->mask.proto) ||
269 unsupported(conf: fconf, VNIC_ID_F, val: fs->val.pfvf_vld,
270 mask: fs->mask.pfvf_vld) ||
271 unsupported(conf: fconf, VNIC_ID_F, val: fs->val.ovlan_vld,
272 mask: fs->mask.ovlan_vld) ||
273 unsupported(conf: fconf, VNIC_ID_F, val: fs->val.encap_vld,
274 mask: fs->mask.encap_vld) ||
275 unsupported(conf: fconf, VLAN_F, val: fs->val.ivlan_vld, mask: fs->mask.ivlan_vld))
276 return -EOPNOTSUPP;
277
278 /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
279 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
280 * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
281 * below. Additionally, since the T4 firmware interface also
282 * carries that overlap, we need to translate any PF/VF
283 * specification into that internal format below.
284 */
285 if ((is_field_set(val: fs->val.pfvf_vld, mask: fs->mask.pfvf_vld) &&
286 is_field_set(val: fs->val.ovlan_vld, mask: fs->mask.ovlan_vld)) ||
287 (is_field_set(val: fs->val.pfvf_vld, mask: fs->mask.pfvf_vld) &&
288 is_field_set(val: fs->val.encap_vld, mask: fs->mask.encap_vld)) ||
289 (is_field_set(val: fs->val.ovlan_vld, mask: fs->mask.ovlan_vld) &&
290 is_field_set(val: fs->val.encap_vld, mask: fs->mask.encap_vld)))
291 return -EOPNOTSUPP;
292 if (unsupported(conf: iconf, VNIC_F, val: fs->val.pfvf_vld, mask: fs->mask.pfvf_vld) ||
293 (is_field_set(val: fs->val.ovlan_vld, mask: fs->mask.ovlan_vld) &&
294 (iconf & VNIC_F)))
295 return -EOPNOTSUPP;
296 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
297 return -ERANGE;
298 fs->mask.pf &= 0x7;
299 fs->mask.vf &= 0x7f;
300
301 /* If the user is requesting that the filter action loop
302 * matching packets back out one of our ports, make sure that
303 * the egress port is in range.
304 */
305 if (fs->action == FILTER_SWITCH &&
306 fs->eport >= adapter->params.nports)
307 return -ERANGE;
308
309 /* Don't allow various trivially obvious bogus out-of-range values... */
310 if (fs->val.iport >= adapter->params.nports)
311 return -ERANGE;
312
313 /* T4 doesn't support removing VLAN Tags for loop back filters. */
314 if (is_t4(chip: adapter->params.chip) &&
315 fs->action == FILTER_SWITCH &&
316 (fs->newvlan == VLAN_REMOVE ||
317 fs->newvlan == VLAN_REWRITE))
318 return -EOPNOTSUPP;
319
320 if (fs->val.encap_vld &&
321 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
322 return -EOPNOTSUPP;
323 return 0;
324}
325
326static int get_filter_steerq(struct net_device *dev,
327 struct ch_filter_specification *fs)
328{
329 struct adapter *adapter = netdev2adap(dev);
330 int iq;
331
332 /* If the user has requested steering matching Ingress Packets
333 * to a specific Queue Set, we need to make sure it's in range
334 * for the port and map that into the Absolute Queue ID of the
335 * Queue Set's Response Queue.
336 */
337 if (!fs->dirsteer) {
338 if (fs->iq)
339 return -EINVAL;
340 iq = 0;
341 } else {
342 struct port_info *pi = netdev_priv(dev);
343
344 /* If the iq id is greater than the number of qsets,
345 * then assume it is an absolute qid.
346 */
347 if (fs->iq < pi->nqsets)
348 iq = adapter->sge.ethrxq[pi->first_qset +
349 fs->iq].rspq.abs_id;
350 else
351 iq = fs->iq;
352 }
353
354 return iq;
355}
356
357static int get_filter_count(struct adapter *adapter, unsigned int fidx,
358 u64 *pkts, u64 *bytes, bool hash)
359{
360 unsigned int tcb_base, tcbaddr;
361 unsigned int word_offset;
362 struct filter_entry *f;
363 __be64 be64_byte_count;
364 int ret;
365
366 tcb_base = t4_read_reg(adap: adapter, TP_CMM_TCB_BASE_A);
367 if (is_hashfilter(adap: adapter) && hash) {
368 if (tid_out_of_range(t: &adapter->tids, tid: fidx))
369 return -E2BIG;
370 f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
371 if (!f)
372 return -EINVAL;
373 } else {
374 if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
375 adapter->tids.nhpftids - 1)) &&
376 fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
377 return -E2BIG;
378
379 if (fidx < adapter->tids.nhpftids)
380 f = &adapter->tids.hpftid_tab[fidx];
381 else
382 f = &adapter->tids.ftid_tab[fidx -
383 adapter->tids.nhpftids];
384 if (!f->valid)
385 return -EINVAL;
386 }
387 tcbaddr = tcb_base + f->tid * TCB_SIZE;
388
389 spin_lock(lock: &adapter->win0_lock);
390 if (is_t4(chip: adapter->params.chip)) {
391 __be64 be64_count;
392
393 /* T4 doesn't maintain byte counts in hw */
394 *bytes = 0;
395
396 /* Get pkts */
397 word_offset = 4;
398 ret = t4_memory_rw(adap: adapter, win: MEMWIN_NIC, mtype: MEM_EDC0,
399 addr: tcbaddr + (word_offset * sizeof(__be32)),
400 len: sizeof(be64_count),
401 buf: (__be32 *)&be64_count,
402 T4_MEMORY_READ);
403 if (ret < 0)
404 goto out;
405 *pkts = be64_to_cpu(be64_count);
406 } else {
407 __be32 be32_count;
408
409 /* Get bytes */
410 word_offset = 4;
411 ret = t4_memory_rw(adap: adapter, win: MEMWIN_NIC, mtype: MEM_EDC0,
412 addr: tcbaddr + (word_offset * sizeof(__be32)),
413 len: sizeof(be64_byte_count),
414 buf: &be64_byte_count,
415 T4_MEMORY_READ);
416 if (ret < 0)
417 goto out;
418 *bytes = be64_to_cpu(be64_byte_count);
419
420 /* Get pkts */
421 word_offset = 6;
422 ret = t4_memory_rw(adap: adapter, win: MEMWIN_NIC, mtype: MEM_EDC0,
423 addr: tcbaddr + (word_offset * sizeof(__be32)),
424 len: sizeof(be32_count),
425 buf: &be32_count,
426 T4_MEMORY_READ);
427 if (ret < 0)
428 goto out;
429 *pkts = (u64)be32_to_cpu(be32_count);
430 }
431
432out:
433 spin_unlock(lock: &adapter->win0_lock);
434 return ret;
435}
436
437int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
438 u64 *hitcnt, u64 *bytecnt, bool hash)
439{
440 struct adapter *adapter = netdev2adap(dev);
441
442 return get_filter_count(adapter, fidx, pkts: hitcnt, bytes: bytecnt, hash);
443}
444
445static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
446 u32 prio)
447{
448 struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
449 u32 prev_ftid, next_ftid;
450
451 /* Only insert the rule if both of the following conditions
452 * are met:
453 * 1. The immediate previous rule has priority <= @prio.
454 * 2. The immediate next rule has priority >= @prio.
455 */
456
457 /* High Priority (HPFILTER) region always has higher priority
458 * than normal FILTER region. So, all rules in HPFILTER region
459 * must have prio value <= rules in normal FILTER region.
460 */
461 if (idx < t->nhpftids) {
462 /* Don't insert if there's a rule already present at @idx
463 * in HPFILTER region.
464 */
465 if (test_bit(idx, t->hpftid_bmap))
466 return false;
467
468 next_tab = t->hpftid_tab;
469 next_ftid = find_next_bit(addr: t->hpftid_bmap, size: t->nhpftids, offset: idx);
470 if (next_ftid >= t->nhpftids) {
471 /* No next entry found in HPFILTER region.
472 * See if there's any next entry in normal
473 * FILTER region.
474 */
475 next_ftid = find_first_bit(addr: t->ftid_bmap, size: t->nftids);
476 if (next_ftid >= t->nftids)
477 next_ftid = idx;
478 else
479 next_tab = t->ftid_tab;
480 }
481
482 /* Search for the closest previous filter entry in HPFILTER
483 * region. No need to search in normal FILTER region because
484 * there can never be any entry in normal FILTER region whose
485 * prio value is < last entry in HPFILTER region.
486 */
487 prev_ftid = find_last_bit(addr: t->hpftid_bmap, size: idx);
488 if (prev_ftid >= idx)
489 prev_ftid = idx;
490
491 prev_tab = t->hpftid_tab;
492 } else {
493 idx -= t->nhpftids;
494
495 /* Don't insert if there's a rule already present at @idx
496 * in normal FILTER region.
497 */
498 if (test_bit(idx, t->ftid_bmap))
499 return false;
500
501 prev_tab = t->ftid_tab;
502 prev_ftid = find_last_bit(addr: t->ftid_bmap, size: idx);
503 if (prev_ftid >= idx) {
504 /* No previous entry found in normal FILTER
505 * region. See if there's any previous entry
506 * in HPFILTER region.
507 */
508 prev_ftid = find_last_bit(addr: t->hpftid_bmap, size: t->nhpftids);
509 if (prev_ftid >= t->nhpftids)
510 prev_ftid = idx;
511 else
512 prev_tab = t->hpftid_tab;
513 }
514
515 /* Search for the closest next filter entry in normal
516 * FILTER region. No need to search in HPFILTER region
517 * because there can never be any entry in HPFILTER
518 * region whose prio value is > first entry in normal
519 * FILTER region.
520 */
521 next_ftid = find_next_bit(addr: t->ftid_bmap, size: t->nftids, offset: idx);
522 if (next_ftid >= t->nftids)
523 next_ftid = idx;
524
525 next_tab = t->ftid_tab;
526 }
527
528 next_fe = &next_tab[next_ftid];
529
530 /* See if the filter entry belongs to an IPv6 rule, which
531 * occupy 4 slots on T5 and 2 slots on T6. Adjust the
532 * reference to the previously inserted filter entry
533 * accordingly.
534 */
535 prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
536 if (!prev_fe->fs.type)
537 prev_fe = &prev_tab[prev_ftid];
538
539 if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
540 (next_fe->valid && next_fe->fs.tc_prio < prio))
541 return false;
542
543 return true;
544}
545
546int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
547 u32 tc_prio)
548{
549 struct adapter *adap = netdev2adap(dev);
550 struct tid_info *t = &adap->tids;
551 u32 bmap_ftid, max_ftid;
552 struct filter_entry *f;
553 unsigned long *bmap;
554 bool found = false;
555 u8 i, cnt, n;
556 int ftid = 0;
557
558 /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
559 * on T5.
560 */
561 n = 1;
562 if (family == PF_INET6) {
563 n++;
564 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
565 n += 2;
566 }
567
568 /* There are 3 filter regions available in hardware in
569 * following order of priority:
570 *
571 * 1. High Priority (HPFILTER) region (Highest Priority).
572 * 2. HASH region.
573 * 3. Normal FILTER region (Lowest Priority).
574 *
575 * Entries in HPFILTER and normal FILTER region have index
576 * 0 as the highest priority and the rules will be scanned
577 * in ascending order until either a rule hits or end of
578 * the region is reached.
579 *
580 * All HASH region entries have same priority. The set of
581 * fields to match in headers are pre-determined. The same
582 * set of header match fields must be compulsorily specified
583 * in all the rules wanting to get inserted in HASH region.
584 * Hence, HASH region is an exact-match region. A HASH is
585 * generated for a rule based on the values in the
586 * pre-determined set of header match fields. The generated
587 * HASH serves as an index into the HASH region. There can
588 * never be 2 rules having the same HASH. Hardware will
589 * compute a HASH for every incoming packet based on the
590 * values in the pre-determined set of header match fields
591 * and uses it as an index to check if there's a rule
592 * inserted in the HASH region at the specified index. If
593 * there's a rule inserted, then it's considered as a filter
594 * hit. Otherwise, it's a filter miss and normal FILTER region
595 * is scanned afterwards.
596 */
597
598 spin_lock_bh(lock: &t->ftid_lock);
599
600 ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
601 max_ftid = t->nftids + t->nhpftids;
602 while (ftid < max_ftid) {
603 if (ftid < t->nhpftids) {
604 /* If the new rule wants to get inserted into
605 * HPFILTER region, but its prio is greater
606 * than the rule with the highest prio in HASH
607 * region, or if there's not enough slots
608 * available in HPFILTER region, then skip
609 * trying to insert this rule into HPFILTER
610 * region and directly go to the next region.
611 */
612 if ((t->tc_hash_tids_max_prio &&
613 tc_prio > t->tc_hash_tids_max_prio) ||
614 (ftid + n) > t->nhpftids) {
615 ftid = t->nhpftids;
616 continue;
617 }
618
619 bmap = t->hpftid_bmap;
620 bmap_ftid = ftid;
621 } else if (hash_en) {
622 /* Ensure priority is >= last rule in HPFILTER
623 * region.
624 */
625 ftid = find_last_bit(addr: t->hpftid_bmap, size: t->nhpftids);
626 if (ftid < t->nhpftids) {
627 f = &t->hpftid_tab[ftid];
628 if (f->valid && tc_prio < f->fs.tc_prio)
629 break;
630 }
631
632 /* Ensure priority is <= first rule in normal
633 * FILTER region.
634 */
635 ftid = find_first_bit(addr: t->ftid_bmap, size: t->nftids);
636 if (ftid < t->nftids) {
637 f = &t->ftid_tab[ftid];
638 if (f->valid && tc_prio > f->fs.tc_prio)
639 break;
640 }
641
642 found = true;
643 ftid = t->nhpftids;
644 goto out_unlock;
645 } else {
646 /* If the new rule wants to get inserted into
647 * normal FILTER region, but its prio is less
648 * than the rule with the highest prio in HASH
649 * region, then reject the rule.
650 */
651 if (t->tc_hash_tids_max_prio &&
652 tc_prio < t->tc_hash_tids_max_prio)
653 break;
654
655 if (ftid + n > max_ftid)
656 break;
657
658 bmap = t->ftid_bmap;
659 bmap_ftid = ftid - t->nhpftids;
660 }
661
662 cnt = 0;
663 for (i = 0; i < n; i++) {
664 if (test_bit(bmap_ftid + i, bmap))
665 break;
666 cnt++;
667 }
668
669 if (cnt == n) {
670 /* Ensure the new rule's prio doesn't conflict
671 * with existing rules.
672 */
673 if (cxgb4_filter_prio_in_range(t, idx: ftid, nslots: n,
674 prio: tc_prio)) {
675 ftid &= ~(n - 1);
676 found = true;
677 break;
678 }
679 }
680
681 ftid += n;
682 }
683
684out_unlock:
685 spin_unlock_bh(lock: &t->ftid_lock);
686 return found ? ftid : -ENOMEM;
687}
688
689static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
690 unsigned int chip_ver)
691{
692 spin_lock_bh(lock: &t->ftid_lock);
693
694 if (test_bit(fidx, t->ftid_bmap)) {
695 spin_unlock_bh(lock: &t->ftid_lock);
696 return -EBUSY;
697 }
698
699 if (family == PF_INET) {
700 __set_bit(fidx, t->ftid_bmap);
701 } else {
702 if (chip_ver < CHELSIO_T6)
703 bitmap_allocate_region(bitmap: t->ftid_bmap, pos: fidx, order: 2);
704 else
705 bitmap_allocate_region(bitmap: t->ftid_bmap, pos: fidx, order: 1);
706 }
707
708 spin_unlock_bh(lock: &t->ftid_lock);
709 return 0;
710}
711
712static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
713{
714 spin_lock_bh(lock: &t->ftid_lock);
715
716 if (test_bit(fidx, t->hpftid_bmap)) {
717 spin_unlock_bh(lock: &t->ftid_lock);
718 return -EBUSY;
719 }
720
721 if (family == PF_INET)
722 __set_bit(fidx, t->hpftid_bmap);
723 else
724 bitmap_allocate_region(bitmap: t->hpftid_bmap, pos: fidx, order: 1);
725
726 spin_unlock_bh(lock: &t->ftid_lock);
727 return 0;
728}
729
730static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
731 unsigned int chip_ver)
732{
733 spin_lock_bh(lock: &t->ftid_lock);
734 if (family == PF_INET) {
735 __clear_bit(fidx, t->ftid_bmap);
736 } else {
737 if (chip_ver < CHELSIO_T6)
738 bitmap_release_region(bitmap: t->ftid_bmap, pos: fidx, order: 2);
739 else
740 bitmap_release_region(bitmap: t->ftid_bmap, pos: fidx, order: 1);
741 }
742 spin_unlock_bh(lock: &t->ftid_lock);
743}
744
745static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
746{
747 spin_lock_bh(lock: &t->ftid_lock);
748
749 if (family == PF_INET)
750 __clear_bit(fidx, t->hpftid_bmap);
751 else
752 bitmap_release_region(bitmap: t->hpftid_bmap, pos: fidx, order: 1);
753
754 spin_unlock_bh(lock: &t->ftid_lock);
755}
756
757/* Delete the filter at a specified index. */
758static int del_filter_wr(struct adapter *adapter, int fidx)
759{
760 struct fw_filter_wr *fwr;
761 struct filter_entry *f;
762 struct sk_buff *skb;
763 unsigned int len;
764
765 if (fidx < adapter->tids.nhpftids)
766 f = &adapter->tids.hpftid_tab[fidx];
767 else
768 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
769
770 len = sizeof(*fwr);
771
772 skb = alloc_skb(size: len, GFP_KERNEL);
773 if (!skb)
774 return -ENOMEM;
775
776 fwr = __skb_put(skb, len);
777 t4_mk_filtdelwr(ftid: f->tid, wr: fwr, qid: adapter->sge.fw_evtq.abs_id);
778
779 /* Mark the filter as "pending" and ship off the Filter Work Request.
780 * When we get the Work Request Reply we'll clear the pending status.
781 */
782 f->pending = 1;
783 t4_mgmt_tx(adap: adapter, skb);
784 return 0;
785}
786
787/* Send a Work Request to write the filter at a specified index. We construct
788 * a Firmware Filter Work Request to have the work done and put the indicated
789 * filter into "pending" mode which will prevent any further actions against
790 * it till we get a reply from the firmware on the completion status of the
791 * request.
792 */
793int set_filter_wr(struct adapter *adapter, int fidx)
794{
795 struct fw_filter2_wr *fwr;
796 struct filter_entry *f;
797 struct sk_buff *skb;
798
799 if (fidx < adapter->tids.nhpftids)
800 f = &adapter->tids.hpftid_tab[fidx];
801 else
802 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
803
804 skb = alloc_skb(size: sizeof(*fwr), GFP_KERNEL);
805 if (!skb)
806 return -ENOMEM;
807
808 /* If the new filter requires loopback Destination MAC and/or VLAN
809 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
810 * the filter.
811 */
812 if (f->fs.newdmac || f->fs.newvlan) {
813 /* allocate L2T entry for new filter */
814 f->l2t = t4_l2t_alloc_switching(adap: adapter, vlan: f->fs.vlan,
815 port: f->fs.eport, dmac: f->fs.dmac);
816 if (!f->l2t) {
817 kfree_skb(skb);
818 return -ENOMEM;
819 }
820 }
821
822 /* If the new filter requires loopback Source MAC rewriting then
823 * we need to allocate a SMT entry for the filter.
824 */
825 if (f->fs.newsmac) {
826 f->smt = cxgb4_smt_alloc_switching(dev: f->dev, smac: f->fs.smac);
827 if (!f->smt) {
828 if (f->l2t) {
829 cxgb4_l2t_release(e: f->l2t);
830 f->l2t = NULL;
831 }
832 kfree_skb(skb);
833 return -ENOMEM;
834 }
835 }
836
837 fwr = __skb_put_zero(skb, len: sizeof(*fwr));
838
839 /* It would be nice to put most of the following in t4_hw.c but most
840 * of the work is translating the cxgbtool ch_filter_specification
841 * into the Work Request and the definition of that structure is
842 * currently in cxgbtool.h which isn't appropriate to pull into the
843 * common code. We may eventually try to come up with a more neutral
844 * filter specification structure but for now it's easiest to simply
845 * put this fairly direct code in line ...
846 */
847 if (adapter->params.filter2_wr_support)
848 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
849 else
850 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
851 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
852 fwr->tid_to_iq =
853 htonl(FW_FILTER_WR_TID_V(f->tid) |
854 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
855 FW_FILTER_WR_NOREPLY_V(0) |
856 FW_FILTER_WR_IQ_V(f->fs.iq));
857 fwr->del_filter_to_l2tix =
858 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
859 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
860 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
861 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
862 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
863 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
864 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
865 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
866 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
867 f->fs.newvlan == VLAN_REWRITE) |
868 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
869 f->fs.newvlan == VLAN_REWRITE) |
870 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
871 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
872 FW_FILTER_WR_PRIO_V(f->fs.prio) |
873 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
874 fwr->ethtype = htons(f->fs.val.ethtype);
875 fwr->ethtypem = htons(f->fs.mask.ethtype);
876 fwr->frag_to_ovlan_vldm =
877 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
878 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
879 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
880 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
881 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
882 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
883 if (f->fs.newsmac)
884 fwr->smac_sel = f->smt->idx;
885 fwr->rx_chan_rx_rpl_iq =
886 htons(FW_FILTER_WR_RX_CHAN_V(0) |
887 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
888 fwr->maci_to_matchtypem =
889 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
890 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
891 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
892 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
893 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
894 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
895 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
896 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
897 fwr->ptcl = f->fs.val.proto;
898 fwr->ptclm = f->fs.mask.proto;
899 fwr->ttyp = f->fs.val.tos;
900 fwr->ttypm = f->fs.mask.tos;
901 fwr->ivlan = htons(f->fs.val.ivlan);
902 fwr->ivlanm = htons(f->fs.mask.ivlan);
903 fwr->ovlan = htons(f->fs.val.ovlan);
904 fwr->ovlanm = htons(f->fs.mask.ovlan);
905 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
906 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
907 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
908 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
909 fwr->lp = htons(f->fs.val.lport);
910 fwr->lpm = htons(f->fs.mask.lport);
911 fwr->fp = htons(f->fs.val.fport);
912 fwr->fpm = htons(f->fs.mask.fport);
913
914 if (adapter->params.filter2_wr_support) {
915 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
916 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
917
918 fwr->natmode_to_ulp_type =
919 FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
920 ULP_MODE_TCPDDP :
921 ULP_MODE_NONE) |
922 FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
923 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
924 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
925 fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
926 fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
927 }
928
929 /* Mark the filter as "pending" and ship off the Filter Work Request.
930 * When we get the Work Request Reply we'll clear the pending status.
931 */
932 f->pending = 1;
933 set_wr_txq(skb, prio: CPL_PRIORITY_CONTROL, queue: f->fs.val.iport & 0x3);
934 t4_ofld_send(adap: adapter, skb);
935 return 0;
936}
937
938/* Return an error number if the indicated filter isn't writable ... */
939int writable_filter(struct filter_entry *f)
940{
941 if (f->locked)
942 return -EPERM;
943 if (f->pending)
944 return -EBUSY;
945
946 return 0;
947}
948
949/* Delete the filter at the specified index (if valid). The checks for all
950 * the common problems with doing this like the filter being locked, currently
951 * pending in another operation, etc.
952 */
953int delete_filter(struct adapter *adapter, unsigned int fidx)
954{
955 struct filter_entry *f;
956 int ret;
957
958 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
959 adapter->tids.nhpftids)
960 return -EINVAL;
961
962 if (fidx < adapter->tids.nhpftids)
963 f = &adapter->tids.hpftid_tab[fidx];
964 else
965 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
966 ret = writable_filter(f);
967 if (ret)
968 return ret;
969 if (f->valid)
970 return del_filter_wr(adapter, fidx);
971
972 return 0;
973}
974
975/* Clear a filter and release any of its resources that we own. This also
976 * clears the filter's "pending" status.
977 */
978void clear_filter(struct adapter *adap, struct filter_entry *f)
979{
980 struct port_info *pi = netdev_priv(dev: f->dev);
981
982 /* If the new or old filter have loopback rewriting rules then we'll
983 * need to free any existing L2T, SMT, CLIP entries of filter
984 * rule.
985 */
986 if (f->l2t)
987 cxgb4_l2t_release(e: f->l2t);
988
989 if (f->smt)
990 cxgb4_smt_release(e: f->smt);
991
992 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
993 t4_free_encap_mac_filt(adap, viid: pi->viid,
994 idx: f->fs.val.ovlan & 0x1ff, sleep_ok: 0);
995
996 if ((f->fs.hash || is_t6(chip: adap->params.chip)) && f->fs.type)
997 cxgb4_clip_release(dev: f->dev, lip: (const u32 *)&f->fs.val.lip, v6: 1);
998
999 /* The zeroing of the filter rule below clears the filter valid,
1000 * pending, locked flags, l2t pointer, etc. so it's all we need for
1001 * this operation.
1002 */
1003 memset(f, 0, sizeof(*f));
1004}
1005
1006void clear_all_filters(struct adapter *adapter)
1007{
1008 struct net_device *dev = adapter->port[0];
1009 unsigned int i;
1010
1011 if (adapter->tids.hpftid_tab) {
1012 struct filter_entry *f = &adapter->tids.hpftid_tab[0];
1013
1014 for (i = 0; i < adapter->tids.nhpftids; i++, f++)
1015 if (f->valid || f->pending)
1016 cxgb4_del_filter(dev, filter_id: i, fs: &f->fs);
1017 }
1018
1019 if (adapter->tids.ftid_tab) {
1020 struct filter_entry *f = &adapter->tids.ftid_tab[0];
1021 unsigned int max_ftid = adapter->tids.nftids +
1022 adapter->tids.nsftids +
1023 adapter->tids.nhpftids;
1024
1025 /* Clear all TCAM filters */
1026 for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
1027 if (f->valid || f->pending)
1028 cxgb4_del_filter(dev, filter_id: i, fs: &f->fs);
1029 }
1030
1031 /* Clear all hash filters */
1032 if (is_hashfilter(adap: adapter) && adapter->tids.tid_tab) {
1033 struct filter_entry *f;
1034 unsigned int sb;
1035
1036 for (i = adapter->tids.hash_base;
1037 i <= adapter->tids.ntids; i++) {
1038 f = (struct filter_entry *)
1039 adapter->tids.tid_tab[i];
1040
1041 if (f && (f->valid || f->pending))
1042 cxgb4_del_filter(dev, filter_id: f->tid, fs: &f->fs);
1043 }
1044
1045 sb = adapter->tids.stid_base;
1046 for (i = 0; i < sb; i++) {
1047 f = (struct filter_entry *)adapter->tids.tid_tab[i];
1048
1049 if (f && (f->valid || f->pending))
1050 cxgb4_del_filter(dev, filter_id: f->tid, fs: &f->fs);
1051 }
1052 }
1053}
1054
1055/* Fill up default masks for set match fields. */
1056static void fill_default_mask(struct ch_filter_specification *fs)
1057{
1058 unsigned int lip = 0, lip_mask = 0;
1059 unsigned int fip = 0, fip_mask = 0;
1060 unsigned int i;
1061
1062 if (fs->val.iport && !fs->mask.iport)
1063 fs->mask.iport |= ~0;
1064 if (fs->val.fcoe && !fs->mask.fcoe)
1065 fs->mask.fcoe |= ~0;
1066 if (fs->val.matchtype && !fs->mask.matchtype)
1067 fs->mask.matchtype |= ~0;
1068 if (fs->val.macidx && !fs->mask.macidx)
1069 fs->mask.macidx |= ~0;
1070 if (fs->val.ethtype && !fs->mask.ethtype)
1071 fs->mask.ethtype |= ~0;
1072 if (fs->val.ivlan && !fs->mask.ivlan)
1073 fs->mask.ivlan |= ~0;
1074 if (fs->val.ovlan && !fs->mask.ovlan)
1075 fs->mask.ovlan |= ~0;
1076 if (fs->val.frag && !fs->mask.frag)
1077 fs->mask.frag |= ~0;
1078 if (fs->val.tos && !fs->mask.tos)
1079 fs->mask.tos |= ~0;
1080 if (fs->val.proto && !fs->mask.proto)
1081 fs->mask.proto |= ~0;
1082 if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
1083 fs->mask.pfvf_vld |= ~0;
1084 if (fs->val.pf && !fs->mask.pf)
1085 fs->mask.pf |= ~0;
1086 if (fs->val.vf && !fs->mask.vf)
1087 fs->mask.vf |= ~0;
1088
1089 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
1090 lip |= fs->val.lip[i];
1091 lip_mask |= fs->mask.lip[i];
1092 fip |= fs->val.fip[i];
1093 fip_mask |= fs->mask.fip[i];
1094 }
1095
1096 if (lip && !lip_mask)
1097 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
1098
1099 if (fip && !fip_mask)
1100 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
1101
1102 if (fs->val.lport && !fs->mask.lport)
1103 fs->mask.lport = ~0;
1104 if (fs->val.fport && !fs->mask.fport)
1105 fs->mask.fport = ~0;
1106}
1107
1108static bool is_addr_all_mask(u8 *ipmask, int family)
1109{
1110 if (family == AF_INET) {
1111 struct in_addr *addr;
1112
1113 addr = (struct in_addr *)ipmask;
1114 if (addr->s_addr == htonl(0xffffffff))
1115 return true;
1116 } else if (family == AF_INET6) {
1117 struct in6_addr *addr6;
1118
1119 addr6 = (struct in6_addr *)ipmask;
1120 if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
1121 addr6->s6_addr32[1] == htonl(0xffffffff) &&
1122 addr6->s6_addr32[2] == htonl(0xffffffff) &&
1123 addr6->s6_addr32[3] == htonl(0xffffffff))
1124 return true;
1125 }
1126 return false;
1127}
1128
1129static bool is_inaddr_any(u8 *ip, int family)
1130{
1131 int addr_type;
1132
1133 if (family == AF_INET) {
1134 struct in_addr *addr;
1135
1136 addr = (struct in_addr *)ip;
1137 if (addr->s_addr == htonl(INADDR_ANY))
1138 return true;
1139 } else if (family == AF_INET6) {
1140 struct in6_addr *addr6;
1141
1142 addr6 = (struct in6_addr *)ip;
1143 addr_type = ipv6_addr_type(addr: (const struct in6_addr *)
1144 &addr6);
1145 if (addr_type == IPV6_ADDR_ANY)
1146 return true;
1147 }
1148 return false;
1149}
1150
1151bool is_filter_exact_match(struct adapter *adap,
1152 struct ch_filter_specification *fs)
1153{
1154 struct tp_params *tp = &adap->params.tp;
1155 u64 hash_filter_mask = tp->hash_filter_mask;
1156 u64 ntuple_mask = 0;
1157
1158 if (!is_hashfilter(adap))
1159 return false;
1160
1161 if ((atomic_read(v: &adap->tids.hash_tids_in_use) +
1162 atomic_read(v: &adap->tids.tids_in_use)) >=
1163 (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
1164 return false;
1165
1166 /* Keep tunnel VNI match disabled for hash-filters for now */
1167 if (fs->mask.encap_vld)
1168 return false;
1169
1170 if (fs->type) {
1171 if (is_inaddr_any(ip: fs->val.fip, AF_INET6) ||
1172 !is_addr_all_mask(ipmask: fs->mask.fip, AF_INET6))
1173 return false;
1174
1175 if (is_inaddr_any(ip: fs->val.lip, AF_INET6) ||
1176 !is_addr_all_mask(ipmask: fs->mask.lip, AF_INET6))
1177 return false;
1178 } else {
1179 if (is_inaddr_any(ip: fs->val.fip, AF_INET) ||
1180 !is_addr_all_mask(ipmask: fs->mask.fip, AF_INET))
1181 return false;
1182
1183 if (is_inaddr_any(ip: fs->val.lip, AF_INET) ||
1184 !is_addr_all_mask(ipmask: fs->mask.lip, AF_INET))
1185 return false;
1186 }
1187
1188 if (!fs->val.lport || fs->mask.lport != 0xffff)
1189 return false;
1190
1191 if (!fs->val.fport || fs->mask.fport != 0xffff)
1192 return false;
1193
1194 /* calculate tuple mask and compare with mask configured in hw */
1195 if (tp->fcoe_shift >= 0)
1196 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
1197
1198 if (tp->port_shift >= 0)
1199 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
1200
1201 if (tp->vnic_shift >= 0) {
1202 if ((adap->params.tp.ingress_config & VNIC_F))
1203 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
1204 else
1205 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
1206 tp->vnic_shift;
1207 }
1208
1209 if (tp->vlan_shift >= 0)
1210 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
1211
1212 if (tp->tos_shift >= 0)
1213 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
1214
1215 if (tp->protocol_shift >= 0)
1216 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
1217
1218 if (tp->ethertype_shift >= 0)
1219 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1220
1221 if (tp->macmatch_shift >= 0)
1222 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
1223
1224 if (tp->matchtype_shift >= 0)
1225 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
1226
1227 if (tp->frag_shift >= 0)
1228 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
1229
1230 if (ntuple_mask != hash_filter_mask)
1231 return false;
1232
1233 return true;
1234}
1235
1236static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
1237 struct net_device *dev)
1238{
1239 struct adapter *adap = netdev2adap(dev);
1240 struct tp_params *tp = &adap->params.tp;
1241 u64 ntuple = 0;
1242
1243 /* Initialize each of the fields which we care about which are present
1244 * in the Compressed Filter Tuple.
1245 */
1246 if (tp->vlan_shift >= 0 && fs->mask.ivlan)
1247 ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
1248
1249 if (tp->port_shift >= 0 && fs->mask.iport)
1250 ntuple |= (u64)fs->val.iport << tp->port_shift;
1251
1252 if (tp->protocol_shift >= 0) {
1253 if (!fs->val.proto)
1254 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
1255 else
1256 ntuple |= (u64)fs->val.proto << tp->protocol_shift;
1257 }
1258
1259 if (tp->tos_shift >= 0 && fs->mask.tos)
1260 ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
1261
1262 if (tp->vnic_shift >= 0) {
1263 if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
1264 fs->mask.encap_vld)
1265 ntuple |= (u64)((fs->val.encap_vld << 16) |
1266 (fs->val.ovlan)) << tp->vnic_shift;
1267 else if ((adap->params.tp.ingress_config & VNIC_F) &&
1268 fs->mask.pfvf_vld)
1269 ntuple |= (u64)((fs->val.pfvf_vld << 16) |
1270 (fs->val.pf << 13) |
1271 (fs->val.vf)) << tp->vnic_shift;
1272 else
1273 ntuple |= (u64)((fs->val.ovlan_vld << 16) |
1274 (fs->val.ovlan)) << tp->vnic_shift;
1275 }
1276
1277 if (tp->macmatch_shift >= 0 && fs->mask.macidx)
1278 ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
1279
1280 if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
1281 ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
1282
1283 if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1284 ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1285
1286 if (tp->frag_shift >= 0 && fs->mask.frag)
1287 ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1288
1289 if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1290 ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1291 return ntuple;
1292}
1293
1294static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1295 unsigned int qid_filterid, struct adapter *adap)
1296{
1297 struct cpl_t6_act_open_req6 *t6req = NULL;
1298 struct cpl_act_open_req6 *req = NULL;
1299
1300 t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, len: sizeof(*t6req));
1301 INIT_TP_WR(t6req, 0);
1302 req = (struct cpl_act_open_req6 *)t6req;
1303 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1304 req->local_port = cpu_to_be16(f->fs.val.lport);
1305 req->peer_port = cpu_to_be16(f->fs.val.fport);
1306 req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1307 req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1308 req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1309 req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1310 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1311 f->fs.newvlan == VLAN_REWRITE) |
1312 DELACK_V(f->fs.hitcnts) |
1313 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1314 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1315 0x7F) << 1) |
1316 TX_CHAN_V(f->fs.eport) |
1317 NO_CONG_V(f->fs.rpttid) |
1318 ULP_MODE_V(f->fs.nat_mode ?
1319 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1320 TCAM_BYPASS_F | NON_OFFLOAD_F);
1321 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1322 f->dev)));
1323 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1324 RSS_QUEUE_V(f->fs.iq) |
1325 TX_QUEUE_V(f->fs.nat_mode) |
1326 T5_OPT_2_VALID_F |
1327 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1328 PACE_V((f->fs.maskhash) |
1329 ((f->fs.dirsteerhash) << 1)));
1330}
1331
1332static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1333 unsigned int qid_filterid, struct adapter *adap)
1334{
1335 struct cpl_t6_act_open_req *t6req = NULL;
1336 struct cpl_act_open_req *req = NULL;
1337
1338 t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, len: sizeof(*t6req));
1339 INIT_TP_WR(t6req, 0);
1340 req = (struct cpl_act_open_req *)t6req;
1341 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1342 req->local_port = cpu_to_be16(f->fs.val.lport);
1343 req->peer_port = cpu_to_be16(f->fs.val.fport);
1344 memcpy(&req->local_ip, f->fs.val.lip, 4);
1345 memcpy(&req->peer_ip, f->fs.val.fip, 4);
1346 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1347 f->fs.newvlan == VLAN_REWRITE) |
1348 DELACK_V(f->fs.hitcnts) |
1349 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1350 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1351 0x7F) << 1) |
1352 TX_CHAN_V(f->fs.eport) |
1353 NO_CONG_V(f->fs.rpttid) |
1354 ULP_MODE_V(f->fs.nat_mode ?
1355 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1356 TCAM_BYPASS_F | NON_OFFLOAD_F);
1357
1358 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1359 f->dev)));
1360 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1361 RSS_QUEUE_V(f->fs.iq) |
1362 TX_QUEUE_V(f->fs.nat_mode) |
1363 T5_OPT_2_VALID_F |
1364 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1365 PACE_V((f->fs.maskhash) |
1366 ((f->fs.dirsteerhash) << 1)));
1367}
1368
1369static int cxgb4_set_hash_filter(struct net_device *dev,
1370 struct ch_filter_specification *fs,
1371 struct filter_ctx *ctx)
1372{
1373 struct adapter *adapter = netdev2adap(dev);
1374 struct port_info *pi = netdev_priv(dev);
1375 struct tid_info *t = &adapter->tids;
1376 struct filter_entry *f;
1377 struct sk_buff *skb;
1378 int iq, atid, size;
1379 int ret = 0;
1380 u32 iconf;
1381
1382 fill_default_mask(fs);
1383 ret = validate_filter(dev, fs);
1384 if (ret)
1385 return ret;
1386
1387 iq = get_filter_steerq(dev, fs);
1388 if (iq < 0)
1389 return iq;
1390
1391 f = kzalloc(size: sizeof(*f), GFP_KERNEL);
1392 if (!f)
1393 return -ENOMEM;
1394
1395 f->fs = *fs;
1396 f->ctx = ctx;
1397 f->dev = dev;
1398 f->fs.iq = iq;
1399
1400 /* If the new filter requires loopback Destination MAC and/or VLAN
1401 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1402 * the filter.
1403 */
1404 if (f->fs.newdmac || f->fs.newvlan) {
1405 /* allocate L2T entry for new filter */
1406 f->l2t = t4_l2t_alloc_switching(adap: adapter, vlan: f->fs.vlan,
1407 port: f->fs.eport, dmac: f->fs.dmac);
1408 if (!f->l2t) {
1409 ret = -ENOMEM;
1410 goto out_err;
1411 }
1412 }
1413
1414 /* If the new filter requires loopback Source MAC rewriting then
1415 * we need to allocate a SMT entry for the filter.
1416 */
1417 if (f->fs.newsmac) {
1418 f->smt = cxgb4_smt_alloc_switching(dev: f->dev, smac: f->fs.smac);
1419 if (!f->smt) {
1420 if (f->l2t) {
1421 cxgb4_l2t_release(e: f->l2t);
1422 f->l2t = NULL;
1423 }
1424 ret = -ENOMEM;
1425 goto free_l2t;
1426 }
1427 }
1428
1429 atid = cxgb4_alloc_atid(t, data: f);
1430 if (atid < 0) {
1431 ret = atid;
1432 goto free_smt;
1433 }
1434
1435 iconf = adapter->params.tp.ingress_config;
1436 if (iconf & VNIC_F) {
1437 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1438 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1439 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1440 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1441 } else if (iconf & USE_ENC_IDX_F) {
1442 if (f->fs.val.encap_vld) {
1443 struct port_info *pi = netdev_priv(dev: f->dev);
1444 static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1445
1446 /* allocate MPS TCAM entry */
1447 ret = t4_alloc_encap_mac_filt(adap: adapter, viid: pi->viid,
1448 addr: match_all_mac,
1449 mask: match_all_mac,
1450 vni: f->fs.val.vni,
1451 vni_mask: f->fs.mask.vni,
1452 dip_hit: 0, lookup_type: 1, sleep_ok: 1);
1453 if (ret < 0)
1454 goto free_atid;
1455
1456 f->fs.val.ovlan = ret;
1457 f->fs.mask.ovlan = 0xffff;
1458 f->fs.val.ovlan_vld = 1;
1459 f->fs.mask.ovlan_vld = 1;
1460 }
1461 }
1462
1463 size = sizeof(struct cpl_t6_act_open_req);
1464 if (f->fs.type) {
1465 ret = cxgb4_clip_get(dev: f->dev, lip: (const u32 *)&f->fs.val.lip, v6: 1);
1466 if (ret)
1467 goto free_mps;
1468
1469 skb = alloc_skb(size, GFP_KERNEL);
1470 if (!skb) {
1471 ret = -ENOMEM;
1472 goto free_clip;
1473 }
1474
1475 mk_act_open_req6(f, skb,
1476 qid_filterid: ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1477 adap: adapter);
1478 } else {
1479 skb = alloc_skb(size, GFP_KERNEL);
1480 if (!skb) {
1481 ret = -ENOMEM;
1482 goto free_mps;
1483 }
1484
1485 mk_act_open_req(f, skb,
1486 qid_filterid: ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1487 adap: adapter);
1488 }
1489
1490 f->pending = 1;
1491 set_wr_txq(skb, prio: CPL_PRIORITY_SETUP, queue: f->fs.val.iport & 0x3);
1492 t4_ofld_send(adap: adapter, skb);
1493 return 0;
1494
1495free_clip:
1496 cxgb4_clip_release(dev: f->dev, lip: (const u32 *)&f->fs.val.lip, v6: 1);
1497
1498free_mps:
1499 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1500 t4_free_encap_mac_filt(adap: adapter, viid: pi->viid, idx: f->fs.val.ovlan, sleep_ok: 1);
1501
1502free_atid:
1503 cxgb4_free_atid(t, atid);
1504
1505free_smt:
1506 if (f->smt) {
1507 cxgb4_smt_release(e: f->smt);
1508 f->smt = NULL;
1509 }
1510
1511free_l2t:
1512 if (f->l2t) {
1513 cxgb4_l2t_release(e: f->l2t);
1514 f->l2t = NULL;
1515 }
1516
1517out_err:
1518 kfree(objp: f);
1519 return ret;
1520}
1521
1522/* Check a Chelsio Filter Request for validity, convert it into our internal
1523 * format and send it to the hardware. Return 0 on success, an error number
1524 * otherwise. We attach any provided filter operation context to the internal
1525 * filter specification in order to facilitate signaling completion of the
1526 * operation.
1527 */
1528int __cxgb4_set_filter(struct net_device *dev, int ftid,
1529 struct ch_filter_specification *fs,
1530 struct filter_ctx *ctx)
1531{
1532 struct adapter *adapter = netdev2adap(dev);
1533 unsigned int max_fidx, fidx, chip_ver;
1534 int iq, ret, filter_id = ftid;
1535 struct filter_entry *f, *tab;
1536 u32 iconf;
1537
1538 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1539 if (fs->hash) {
1540 if (is_hashfilter(adap: adapter))
1541 return cxgb4_set_hash_filter(dev, fs, ctx);
1542 netdev_err(dev, format: "%s: Exact-match filters only supported with Hash Filter configuration\n",
1543 __func__);
1544 return -EINVAL;
1545 }
1546
1547 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1548 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1549 filter_id >= max_fidx)
1550 return -E2BIG;
1551
1552 fill_default_mask(fs);
1553
1554 ret = validate_filter(dev, fs);
1555 if (ret)
1556 return ret;
1557
1558 iq = get_filter_steerq(dev, fs);
1559 if (iq < 0)
1560 return iq;
1561
1562 if (fs->prio) {
1563 tab = &adapter->tids.hpftid_tab[0];
1564 } else {
1565 tab = &adapter->tids.ftid_tab[0];
1566 filter_id = ftid - adapter->tids.nhpftids;
1567 }
1568
1569 /* IPv6 filters occupy four slots and must be aligned on
1570 * four-slot boundaries. IPv4 filters only occupy a single
1571 * slot and have no alignment requirements but writing a new
1572 * IPv4 filter into the middle of an existing IPv6 filter
1573 * requires clearing the old IPv6 filter and hence we prevent
1574 * insertion.
1575 */
1576 if (fs->type == 0) { /* IPv4 */
1577 /* For T6, If our IPv4 filter isn't being written to a
1578 * multiple of two filter index and there's an IPv6
1579 * filter at the multiple of 2 base slot, then we need
1580 * to delete that IPv6 filter ...
1581 * For adapters below T6, IPv6 filter occupies 4 entries.
1582 * Hence we need to delete the filter in multiple of 4 slot.
1583 */
1584 if (chip_ver < CHELSIO_T6)
1585 fidx = filter_id & ~0x3;
1586 else
1587 fidx = filter_id & ~0x1;
1588
1589 if (fidx != filter_id && tab[fidx].fs.type) {
1590 f = &tab[fidx];
1591 if (f->valid) {
1592 dev_err(adapter->pdev_dev,
1593 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1594 fidx, fidx + 3);
1595 return -EINVAL;
1596 }
1597 }
1598 } else { /* IPv6 */
1599 if (chip_ver < CHELSIO_T6) {
1600 /* Ensure that the IPv6 filter is aligned on a
1601 * multiple of 4 boundary.
1602 */
1603 if (filter_id & 0x3) {
1604 dev_err(adapter->pdev_dev,
1605 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1606 return -EINVAL;
1607 }
1608
1609 /* Check all except the base overlapping IPv4 filter
1610 * slots.
1611 */
1612 for (fidx = filter_id + 1; fidx < filter_id + 4;
1613 fidx++) {
1614 f = &tab[fidx];
1615 if (f->valid) {
1616 dev_err(adapter->pdev_dev,
1617 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1618 fidx);
1619 return -EBUSY;
1620 }
1621 }
1622 } else {
1623 /* For T6, CLIP being enabled, IPv6 filter would occupy
1624 * 2 entries.
1625 */
1626 if (filter_id & 0x1)
1627 return -EINVAL;
1628 /* Check overlapping IPv4 filter slot */
1629 fidx = filter_id + 1;
1630 f = &tab[fidx];
1631 if (f->valid) {
1632 pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1633 __func__, fidx);
1634 return -EBUSY;
1635 }
1636 }
1637 }
1638
1639 /* Check to make sure that provided filter index is not
1640 * already in use by someone else
1641 */
1642 f = &tab[filter_id];
1643 if (f->valid)
1644 return -EBUSY;
1645
1646 if (fs->prio) {
1647 fidx = filter_id + adapter->tids.hpftid_base;
1648 ret = cxgb4_set_hpftid(t: &adapter->tids, fidx: filter_id,
1649 family: fs->type ? PF_INET6 : PF_INET);
1650 } else {
1651 fidx = filter_id + adapter->tids.ftid_base;
1652 ret = cxgb4_set_ftid(t: &adapter->tids, fidx: filter_id,
1653 family: fs->type ? PF_INET6 : PF_INET,
1654 chip_ver);
1655 }
1656
1657 if (ret)
1658 return ret;
1659
1660 /* Check t make sure the filter requested is writable ... */
1661 ret = writable_filter(f);
1662 if (ret)
1663 goto free_tid;
1664
1665 if (is_t6(chip: adapter->params.chip) && fs->type &&
1666 ipv6_addr_type(addr: (const struct in6_addr *)fs->val.lip) !=
1667 IPV6_ADDR_ANY) {
1668 ret = cxgb4_clip_get(dev, lip: (const u32 *)&fs->val.lip, v6: 1);
1669 if (ret)
1670 goto free_tid;
1671 }
1672
1673 /* Convert the filter specification into our internal format.
1674 * We copy the PF/VF specification into the Outer VLAN field
1675 * here so the rest of the code -- including the interface to
1676 * the firmware -- doesn't have to constantly do these checks.
1677 */
1678 f->fs = *fs;
1679 f->fs.iq = iq;
1680 f->dev = dev;
1681
1682 iconf = adapter->params.tp.ingress_config;
1683 if (iconf & VNIC_F) {
1684 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1685 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1686 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1687 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1688 } else if (iconf & USE_ENC_IDX_F) {
1689 if (f->fs.val.encap_vld) {
1690 struct port_info *pi = netdev_priv(dev: f->dev);
1691 static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1692
1693 /* allocate MPS TCAM entry */
1694 ret = t4_alloc_encap_mac_filt(adap: adapter, viid: pi->viid,
1695 addr: match_all_mac,
1696 mask: match_all_mac,
1697 vni: f->fs.val.vni,
1698 vni_mask: f->fs.mask.vni,
1699 dip_hit: 0, lookup_type: 1, sleep_ok: 1);
1700 if (ret < 0)
1701 goto free_tid;
1702
1703 f->fs.val.ovlan = ret;
1704 f->fs.mask.ovlan = 0x1ff;
1705 f->fs.val.ovlan_vld = 1;
1706 f->fs.mask.ovlan_vld = 1;
1707 }
1708 }
1709
1710 /* Attempt to set the filter. If we don't succeed, we clear
1711 * it and return the failure.
1712 */
1713 f->ctx = ctx;
1714 f->tid = fidx; /* Save the actual tid */
1715 ret = set_filter_wr(adapter, fidx: ftid);
1716 if (ret)
1717 goto free_tid;
1718
1719 return ret;
1720
1721free_tid:
1722 if (f->fs.prio)
1723 cxgb4_clear_hpftid(t: &adapter->tids, fidx: filter_id,
1724 family: fs->type ? PF_INET6 : PF_INET);
1725 else
1726 cxgb4_clear_ftid(t: &adapter->tids, fidx: filter_id,
1727 family: fs->type ? PF_INET6 : PF_INET,
1728 chip_ver);
1729
1730 clear_filter(adap: adapter, f);
1731 return ret;
1732}
1733
1734static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1735 struct filter_ctx *ctx)
1736{
1737 struct adapter *adapter = netdev2adap(dev);
1738 struct tid_info *t = &adapter->tids;
1739 struct cpl_abort_req *abort_req;
1740 struct cpl_abort_rpl *abort_rpl;
1741 struct cpl_set_tcb_field *req;
1742 struct ulptx_idata *aligner;
1743 struct work_request_hdr *wr;
1744 struct filter_entry *f;
1745 struct sk_buff *skb;
1746 unsigned int wrlen;
1747 int ret;
1748
1749 netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1750 __func__, filter_id, adapter->tids.nftids);
1751
1752 if (tid_out_of_range(t, tid: filter_id))
1753 return -E2BIG;
1754
1755 f = lookup_tid(t, tid: filter_id);
1756 if (!f) {
1757 netdev_err(dev, format: "%s: no filter entry for filter_id = %d",
1758 __func__, filter_id);
1759 return -EINVAL;
1760 }
1761
1762 ret = writable_filter(f);
1763 if (ret)
1764 return ret;
1765
1766 if (!f->valid)
1767 return -EINVAL;
1768
1769 f->ctx = ctx;
1770 f->pending = 1;
1771 wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1772 + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1773 skb = alloc_skb(size: wrlen, GFP_KERNEL);
1774 if (!skb) {
1775 netdev_err(dev, format: "%s: could not allocate skb ..\n", __func__);
1776 return -ENOMEM;
1777 }
1778 set_wr_txq(skb, prio: CPL_PRIORITY_CONTROL, queue: f->fs.val.iport & 0x3);
1779 req = (struct cpl_set_tcb_field *)__skb_put(skb, len: wrlen);
1780 INIT_ULPTX_WR(req, wrlen, 0, 0);
1781 wr = (struct work_request_hdr *)req;
1782 wr++;
1783 req = (struct cpl_set_tcb_field *)wr;
1784 mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1785 TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), cookie: 0, no_reply: 1);
1786 aligner = (struct ulptx_idata *)(req + 1);
1787 abort_req = (struct cpl_abort_req *)(aligner + 1);
1788 mk_abort_req_ulp(abort_req, tid: f->tid);
1789 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1790 mk_abort_rpl_ulp(abort_rpl, tid: f->tid);
1791 t4_ofld_send(adap: adapter, skb);
1792 return 0;
1793}
1794
1795/* Check a delete filter request for validity and send it to the hardware.
1796 * Return 0 on success, an error number otherwise. We attach any provided
1797 * filter operation context to the internal filter specification in order to
1798 * facilitate signaling completion of the operation.
1799 */
1800int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1801 struct ch_filter_specification *fs,
1802 struct filter_ctx *ctx)
1803{
1804 struct adapter *adapter = netdev2adap(dev);
1805 unsigned int max_fidx, chip_ver;
1806 struct filter_entry *f;
1807 int ret;
1808
1809 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1810 if (fs && fs->hash) {
1811 if (is_hashfilter(adap: adapter))
1812 return cxgb4_del_hash_filter(dev, filter_id, ctx);
1813 netdev_err(dev, format: "%s: Exact-match filters only supported with Hash Filter configuration\n",
1814 __func__);
1815 return -EINVAL;
1816 }
1817
1818 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1819 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1820 filter_id >= max_fidx)
1821 return -E2BIG;
1822
1823 if (filter_id < adapter->tids.nhpftids)
1824 f = &adapter->tids.hpftid_tab[filter_id];
1825 else
1826 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
1827
1828 ret = writable_filter(f);
1829 if (ret)
1830 return ret;
1831
1832 if (f->valid) {
1833 f->ctx = ctx;
1834 if (f->fs.prio)
1835 cxgb4_clear_hpftid(t: &adapter->tids,
1836 fidx: f->tid - adapter->tids.hpftid_base,
1837 family: f->fs.type ? PF_INET6 : PF_INET);
1838 else
1839 cxgb4_clear_ftid(t: &adapter->tids,
1840 fidx: f->tid - adapter->tids.ftid_base,
1841 family: f->fs.type ? PF_INET6 : PF_INET,
1842 chip_ver);
1843 return del_filter_wr(adapter, fidx: filter_id);
1844 }
1845
1846 /* If the caller has passed in a Completion Context then we need to
1847 * mark it as a successful completion so they don't stall waiting
1848 * for it.
1849 */
1850 if (ctx) {
1851 ctx->result = 0;
1852 complete(&ctx->completion);
1853 }
1854 return ret;
1855}
1856
1857int cxgb4_set_filter(struct net_device *dev, int filter_id,
1858 struct ch_filter_specification *fs)
1859{
1860 struct filter_ctx ctx;
1861 int ret;
1862
1863 init_completion(x: &ctx.completion);
1864
1865 ret = __cxgb4_set_filter(dev, ftid: filter_id, fs, ctx: &ctx);
1866 if (ret)
1867 goto out;
1868
1869 /* Wait for reply */
1870 ret = wait_for_completion_timeout(x: &ctx.completion, timeout: 10 * HZ);
1871 if (!ret)
1872 return -ETIMEDOUT;
1873
1874 ret = ctx.result;
1875out:
1876 return ret;
1877}
1878
1879int cxgb4_del_filter(struct net_device *dev, int filter_id,
1880 struct ch_filter_specification *fs)
1881{
1882 struct filter_ctx ctx;
1883 int ret;
1884
1885 if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1886 return 0;
1887
1888 init_completion(x: &ctx.completion);
1889
1890 ret = __cxgb4_del_filter(dev, filter_id, fs, ctx: &ctx);
1891 if (ret)
1892 goto out;
1893
1894 /* Wait for reply */
1895 ret = wait_for_completion_timeout(x: &ctx.completion, timeout: 10 * HZ);
1896 if (!ret)
1897 return -ETIMEDOUT;
1898
1899 ret = ctx.result;
1900out:
1901 return ret;
1902}
1903
1904static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1905 struct filter_entry *f)
1906{
1907 if (f->fs.hitcnts) {
1908 set_tcb_field(adap, f, ftid: tid, TCB_TIMESTAMP_W,
1909 TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
1910 TCB_TIMESTAMP_V(0ULL),
1911 no_reply: 1);
1912 set_tcb_field(adap, f, ftid: tid, TCB_RTT_TS_RECENT_AGE_W,
1913 TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1914 TCB_RTT_TS_RECENT_AGE_V(0ULL),
1915 no_reply: 1);
1916 }
1917
1918 if (f->fs.newdmac)
1919 set_tcb_tflag(adap, f, ftid: tid, TF_CCTRL_ECE_S, val: 1,
1920 no_reply: 1);
1921
1922 if (f->fs.newvlan == VLAN_INSERT ||
1923 f->fs.newvlan == VLAN_REWRITE)
1924 set_tcb_tflag(adap, f, ftid: tid, TF_CCTRL_RFR_S, val: 1,
1925 no_reply: 1);
1926 if (f->fs.newsmac)
1927 configure_filter_smac(adap, f);
1928
1929 if (f->fs.nat_mode) {
1930 switch (f->fs.nat_mode) {
1931 case NAT_MODE_DIP:
1932 set_nat_params(adap, f, tid, dip: true, sip: false, dp: false, sp: false);
1933 break;
1934
1935 case NAT_MODE_DIP_DP:
1936 set_nat_params(adap, f, tid, dip: true, sip: false, dp: true, sp: false);
1937 break;
1938
1939 case NAT_MODE_DIP_DP_SIP:
1940 set_nat_params(adap, f, tid, dip: true, sip: true, dp: true, sp: false);
1941 break;
1942 case NAT_MODE_DIP_DP_SP:
1943 set_nat_params(adap, f, tid, dip: true, sip: false, dp: true, sp: true);
1944 break;
1945
1946 case NAT_MODE_SIP_SP:
1947 set_nat_params(adap, f, tid, dip: false, sip: true, dp: false, sp: true);
1948 break;
1949
1950 case NAT_MODE_DIP_SIP_SP:
1951 set_nat_params(adap, f, tid, dip: true, sip: true, dp: false, sp: true);
1952 break;
1953
1954 case NAT_MODE_ALL:
1955 set_nat_params(adap, f, tid, dip: true, sip: true, dp: true, sp: true);
1956 break;
1957
1958 default:
1959 pr_err("%s: Invalid NAT mode: %d\n",
1960 __func__, f->fs.nat_mode);
1961 return -EINVAL;
1962 }
1963 }
1964 return 0;
1965}
1966
1967void hash_del_filter_rpl(struct adapter *adap,
1968 const struct cpl_abort_rpl_rss *rpl)
1969{
1970 unsigned int status = rpl->status;
1971 struct tid_info *t = &adap->tids;
1972 unsigned int tid = GET_TID(rpl);
1973 struct filter_ctx *ctx = NULL;
1974 struct filter_entry *f;
1975
1976 dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1977 __func__, status, tid);
1978
1979 f = lookup_tid(t, tid);
1980 if (!f) {
1981 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1982 __func__);
1983 return;
1984 }
1985 ctx = f->ctx;
1986 f->ctx = NULL;
1987 clear_filter(adap, f);
1988 cxgb4_remove_tid(t, qid: 0, tid, family: 0);
1989 kfree(objp: f);
1990 if (ctx) {
1991 ctx->result = 0;
1992 complete(&ctx->completion);
1993 }
1994}
1995
1996void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1997{
1998 unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1999 unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2000 struct tid_info *t = &adap->tids;
2001 unsigned int tid = GET_TID(rpl);
2002 struct filter_ctx *ctx = NULL;
2003 struct filter_entry *f;
2004
2005 dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
2006 __func__, tid, ftid, status);
2007
2008 f = lookup_atid(t, atid: ftid);
2009 if (!f) {
2010 dev_err(adap->pdev_dev, "%s:could not find filter entry",
2011 __func__);
2012 return;
2013 }
2014 ctx = f->ctx;
2015 f->ctx = NULL;
2016
2017 switch (status) {
2018 case CPL_ERR_NONE:
2019 f->tid = tid;
2020 f->pending = 0;
2021 f->valid = 1;
2022 cxgb4_insert_tid(t, data: f, tid: f->tid, family: 0);
2023 cxgb4_free_atid(t, atid: ftid);
2024 if (ctx) {
2025 ctx->tid = f->tid;
2026 ctx->result = 0;
2027 }
2028 if (configure_filter_tcb(adap, tid, f)) {
2029 clear_filter(adap, f);
2030 cxgb4_remove_tid(t, qid: 0, tid, family: 0);
2031 kfree(objp: f);
2032 if (ctx) {
2033 ctx->result = -EINVAL;
2034 complete(&ctx->completion);
2035 }
2036 return;
2037 }
2038 switch (f->fs.action) {
2039 case FILTER_PASS:
2040 if (f->fs.dirsteer)
2041 set_tcb_tflag(adap, f, ftid: tid,
2042 TF_DIRECT_STEER_S, val: 1, no_reply: 1);
2043 break;
2044 case FILTER_DROP:
2045 set_tcb_tflag(adap, f, ftid: tid, TF_DROP_S, val: 1, no_reply: 1);
2046 break;
2047 case FILTER_SWITCH:
2048 set_tcb_tflag(adap, f, ftid: tid, TF_LPBK_S, val: 1, no_reply: 1);
2049 break;
2050 }
2051
2052 break;
2053
2054 default:
2055 if (status != CPL_ERR_TCAM_FULL)
2056 dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
2057 __func__, status);
2058
2059 if (ctx) {
2060 if (status == CPL_ERR_TCAM_FULL)
2061 ctx->result = -ENOSPC;
2062 else
2063 ctx->result = -EINVAL;
2064 }
2065 clear_filter(adap, f);
2066 cxgb4_free_atid(t, atid: ftid);
2067 kfree(objp: f);
2068 }
2069 if (ctx)
2070 complete(&ctx->completion);
2071}
2072
2073/* Handle a filter write/deletion reply. */
2074void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
2075{
2076 unsigned int tid = GET_TID(rpl);
2077 struct filter_entry *f = NULL;
2078 unsigned int max_fidx;
2079 int idx;
2080
2081 max_fidx = adap->tids.nftids + adap->tids.nsftids;
2082 /* Get the corresponding filter entry for this tid */
2083 if (adap->tids.ftid_tab) {
2084 idx = tid - adap->tids.hpftid_base;
2085 if (idx < adap->tids.nhpftids) {
2086 f = &adap->tids.hpftid_tab[idx];
2087 } else {
2088 /* Check this in normal filter region */
2089 idx = tid - adap->tids.ftid_base;
2090 if (idx >= max_fidx)
2091 return;
2092 f = &adap->tids.ftid_tab[idx];
2093 idx += adap->tids.nhpftids;
2094 }
2095
2096 if (f->tid != tid)
2097 return;
2098 }
2099
2100 /* We found the filter entry for this tid */
2101 if (f) {
2102 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
2103 struct filter_ctx *ctx;
2104
2105 /* Pull off any filter operation context attached to the
2106 * filter.
2107 */
2108 ctx = f->ctx;
2109 f->ctx = NULL;
2110
2111 if (ret == FW_FILTER_WR_FLT_DELETED) {
2112 /* Clear the filter when we get confirmation from the
2113 * hardware that the filter has been deleted.
2114 */
2115 clear_filter(adap, f);
2116 if (ctx)
2117 ctx->result = 0;
2118 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
2119 f->pending = 0; /* async setup completed */
2120 f->valid = 1;
2121 if (ctx) {
2122 ctx->result = 0;
2123 ctx->tid = idx;
2124 }
2125 } else {
2126 /* Something went wrong. Issue a warning about the
2127 * problem and clear everything out.
2128 */
2129 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
2130 idx, ret);
2131 clear_filter(adap, f);
2132 if (ctx)
2133 ctx->result = -EINVAL;
2134 }
2135 if (ctx)
2136 complete(&ctx->completion);
2137 }
2138}
2139
2140void init_hash_filter(struct adapter *adap)
2141{
2142 u32 reg;
2143
2144 /* On T6, verify the necessary register configs and warn the user in
2145 * case of improper config
2146 */
2147 if (is_t6(chip: adap->params.chip)) {
2148 if (is_offload(adap)) {
2149 if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
2150 & ACTIVEFILTERCOUNTS_F)) {
2151 dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
2152 return;
2153 }
2154 } else {
2155 reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
2156 if (TCAM_ACTV_HIT_G(reg) != 4) {
2157 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2158 return;
2159 }
2160
2161 reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
2162 if (HASH_ACTV_HIT_G(reg) != 4) {
2163 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2164 return;
2165 }
2166 }
2167
2168 } else {
2169 dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
2170 return;
2171 }
2172
2173 adap->params.hash_filter = 1;
2174}
2175

source code of linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c