1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. |
3 | * |
4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following |
14 | * conditions are met: |
15 | * |
16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. |
19 | * |
20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. |
24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. |
33 | */ |
34 | |
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
36 | |
37 | #include <linux/bitmap.h> |
38 | #include <linux/crc32.h> |
39 | #include <linux/ctype.h> |
40 | #include <linux/debugfs.h> |
41 | #include <linux/err.h> |
42 | #include <linux/etherdevice.h> |
43 | #include <linux/firmware.h> |
44 | #include <linux/if.h> |
45 | #include <linux/if_vlan.h> |
46 | #include <linux/init.h> |
47 | #include <linux/log2.h> |
48 | #include <linux/mdio.h> |
49 | #include <linux/module.h> |
50 | #include <linux/moduleparam.h> |
51 | #include <linux/mutex.h> |
52 | #include <linux/netdevice.h> |
53 | #include <linux/pci.h> |
54 | #include <linux/rtnetlink.h> |
55 | #include <linux/sched.h> |
56 | #include <linux/seq_file.h> |
57 | #include <linux/sockios.h> |
58 | #include <linux/vmalloc.h> |
59 | #include <linux/workqueue.h> |
60 | #include <net/neighbour.h> |
61 | #include <net/netevent.h> |
62 | #include <net/addrconf.h> |
63 | #include <net/bonding.h> |
64 | #include <linux/uaccess.h> |
65 | #include <linux/crash_dump.h> |
66 | #include <net/udp_tunnel.h> |
67 | #include <net/xfrm.h> |
68 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
69 | #include <net/tls.h> |
70 | #endif |
71 | |
72 | #include "cxgb4.h" |
73 | #include "cxgb4_filter.h" |
74 | #include "t4_regs.h" |
75 | #include "t4_values.h" |
76 | #include "t4_msg.h" |
77 | #include "t4fw_api.h" |
78 | #include "t4fw_version.h" |
79 | #include "cxgb4_dcb.h" |
80 | #include "srq.h" |
81 | #include "cxgb4_debugfs.h" |
82 | #include "clip_tbl.h" |
83 | #include "l2t.h" |
84 | #include "smt.h" |
85 | #include "sched.h" |
86 | #include "cxgb4_tc_u32.h" |
87 | #include "cxgb4_tc_flower.h" |
88 | #include "cxgb4_tc_mqprio.h" |
89 | #include "cxgb4_tc_matchall.h" |
90 | #include "cxgb4_ptp.h" |
91 | #include "cxgb4_cudbg.h" |
92 | |
93 | char cxgb4_driver_name[] = KBUILD_MODNAME; |
94 | |
95 | #define DRV_DESC "Chelsio T4/T5/T6 Network Driver" |
96 | |
97 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
98 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ |
99 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) |
100 | |
101 | /* Macros needed to support the PCI Device ID Table ... |
102 | */ |
103 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ |
104 | static const struct pci_device_id cxgb4_pci_tbl[] = { |
105 | #define CXGB4_UNIFIED_PF 0x4 |
106 | |
107 | #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF |
108 | |
109 | /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is |
110 | * called for both. |
111 | */ |
112 | #define CH_PCI_DEVICE_ID_FUNCTION2 0x0 |
113 | |
114 | #define CH_PCI_ID_TABLE_ENTRY(devid) \ |
115 | {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF} |
116 | |
117 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ |
118 | { 0, } \ |
119 | } |
120 | |
121 | #include "t4_pci_id_tbl.h" |
122 | |
123 | #define FW4_FNAME "cxgb4/t4fw.bin" |
124 | #define FW5_FNAME "cxgb4/t5fw.bin" |
125 | #define FW6_FNAME "cxgb4/t6fw.bin" |
126 | #define FW4_CFNAME "cxgb4/t4-config.txt" |
127 | #define FW5_CFNAME "cxgb4/t5-config.txt" |
128 | #define FW6_CFNAME "cxgb4/t6-config.txt" |
129 | #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld" |
130 | #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin" |
131 | #define PHY_AQ1202_DEVICEID 0x4409 |
132 | #define PHY_BCM84834_DEVICEID 0x4486 |
133 | |
134 | MODULE_DESCRIPTION(DRV_DESC); |
135 | MODULE_AUTHOR("Chelsio Communications" ); |
136 | MODULE_LICENSE("Dual BSD/GPL" ); |
137 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); |
138 | MODULE_FIRMWARE(FW4_FNAME); |
139 | MODULE_FIRMWARE(FW5_FNAME); |
140 | MODULE_FIRMWARE(FW6_FNAME); |
141 | |
142 | /* |
143 | * The driver uses the best interrupt scheme available on a platform in the |
144 | * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which |
145 | * of these schemes the driver may consider as follows: |
146 | * |
147 | * msi = 2: choose from among all three options |
148 | * msi = 1: only consider MSI and INTx interrupts |
149 | * msi = 0: force INTx interrupts |
150 | */ |
151 | static int msi = 2; |
152 | |
153 | module_param(msi, int, 0644); |
154 | MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)" ); |
155 | |
156 | /* |
157 | * Normally we tell the chip to deliver Ingress Packets into our DMA buffers |
158 | * offset by 2 bytes in order to have the IP headers line up on 4-byte |
159 | * boundaries. This is a requirement for many architectures which will throw |
160 | * a machine check fault if an attempt is made to access one of the 4-byte IP |
161 | * header fields on a non-4-byte boundary. And it's a major performance issue |
162 | * even on some architectures which allow it like some implementations of the |
163 | * x86 ISA. However, some architectures don't mind this and for some very |
164 | * edge-case performance sensitive applications (like forwarding large volumes |
165 | * of small packets), setting this DMA offset to 0 will decrease the number of |
166 | * PCI-E Bus transfers enough to measurably affect performance. |
167 | */ |
168 | static int rx_dma_offset = 2; |
169 | |
170 | /* TX Queue select used to determine what algorithm to use for selecting TX |
171 | * queue. Select between the kernel provided function (select_queue=0) or user |
172 | * cxgb_select_queue function (select_queue=1) |
173 | * |
174 | * Default: select_queue=0 |
175 | */ |
176 | static int select_queue; |
177 | module_param(select_queue, int, 0644); |
178 | MODULE_PARM_DESC(select_queue, |
179 | "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method." ); |
180 | |
181 | static struct dentry *cxgb4_debugfs_root; |
182 | |
183 | LIST_HEAD(adapter_list); |
184 | DEFINE_MUTEX(uld_mutex); |
185 | LIST_HEAD(uld_list); |
186 | |
187 | static int cfg_queues(struct adapter *adap); |
188 | |
189 | static void link_report(struct net_device *dev) |
190 | { |
191 | if (!netif_carrier_ok(dev)) |
192 | netdev_info(dev, format: "link down\n" ); |
193 | else { |
194 | static const char *fc[] = { "no" , "Rx" , "Tx" , "Tx/Rx" }; |
195 | |
196 | const char *s; |
197 | const struct port_info *p = netdev_priv(dev); |
198 | |
199 | switch (p->link_cfg.speed) { |
200 | case 100: |
201 | s = "100Mbps" ; |
202 | break; |
203 | case 1000: |
204 | s = "1Gbps" ; |
205 | break; |
206 | case 10000: |
207 | s = "10Gbps" ; |
208 | break; |
209 | case 25000: |
210 | s = "25Gbps" ; |
211 | break; |
212 | case 40000: |
213 | s = "40Gbps" ; |
214 | break; |
215 | case 50000: |
216 | s = "50Gbps" ; |
217 | break; |
218 | case 100000: |
219 | s = "100Gbps" ; |
220 | break; |
221 | default: |
222 | pr_info("%s: unsupported speed: %d\n" , |
223 | dev->name, p->link_cfg.speed); |
224 | return; |
225 | } |
226 | |
227 | netdev_info(dev, format: "link up, %s, full-duplex, %s PAUSE\n" , s, |
228 | fc[p->link_cfg.fc]); |
229 | } |
230 | } |
231 | |
232 | #ifdef CONFIG_CHELSIO_T4_DCB |
233 | /* Set up/tear down Data Center Bridging Priority mapping for a net device. */ |
234 | static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) |
235 | { |
236 | struct port_info *pi = netdev_priv(dev); |
237 | struct adapter *adap = pi->adapter; |
238 | struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; |
239 | int i; |
240 | |
241 | /* We use a simple mapping of Port TX Queue Index to DCB |
242 | * Priority when we're enabling DCB. |
243 | */ |
244 | for (i = 0; i < pi->nqsets; i++, txq++) { |
245 | u32 name, value; |
246 | int err; |
247 | |
248 | name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
249 | FW_PARAMS_PARAM_X_V( |
250 | FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | |
251 | FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); |
252 | value = enable ? i : 0xffffffff; |
253 | |
254 | /* Since we can be called while atomic (from "interrupt |
255 | * level") we need to issue the Set Parameters Commannd |
256 | * without sleeping (timeout < 0). |
257 | */ |
258 | err = t4_set_params_timeout(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, |
259 | params: &name, val: &value, |
260 | timeout: -FW_CMD_MAX_TIMEOUT); |
261 | |
262 | if (err) |
263 | dev_err(adap->pdev_dev, |
264 | "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n" , |
265 | enable ? "set" : "unset" , pi->port_id, i, -err); |
266 | else |
267 | txq->dcb_prio = enable ? value : 0; |
268 | } |
269 | } |
270 | |
271 | int cxgb4_dcb_enabled(const struct net_device *dev) |
272 | { |
273 | struct port_info *pi = netdev_priv(dev); |
274 | |
275 | if (!pi->dcb.enabled) |
276 | return 0; |
277 | |
278 | return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || |
279 | (pi->dcb.state == CXGB4_DCB_STATE_HOST)); |
280 | } |
281 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
282 | |
283 | void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) |
284 | { |
285 | struct net_device *dev = adapter->port[port_id]; |
286 | |
287 | /* Skip changes from disabled ports. */ |
288 | if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { |
289 | if (link_stat) |
290 | netif_carrier_on(dev); |
291 | else { |
292 | #ifdef CONFIG_CHELSIO_T4_DCB |
293 | if (cxgb4_dcb_enabled(dev)) { |
294 | cxgb4_dcb_reset(dev); |
295 | dcb_tx_queue_prio_enable(dev, enable: false); |
296 | } |
297 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
298 | netif_carrier_off(dev); |
299 | } |
300 | |
301 | link_report(dev); |
302 | } |
303 | } |
304 | |
305 | void t4_os_portmod_changed(struct adapter *adap, int port_id) |
306 | { |
307 | static const char *mod_str[] = { |
308 | NULL, "LR" , "SR" , "ER" , "passive DA" , "active DA" , "LRM" |
309 | }; |
310 | |
311 | struct net_device *dev = adap->port[port_id]; |
312 | struct port_info *pi = netdev_priv(dev); |
313 | |
314 | if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) |
315 | netdev_info(dev, format: "port module unplugged\n" ); |
316 | else if (pi->mod_type < ARRAY_SIZE(mod_str)) |
317 | netdev_info(dev, format: "%s module inserted\n" , mod_str[pi->mod_type]); |
318 | else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) |
319 | netdev_info(dev, format: "%s: unsupported port module inserted\n" , |
320 | dev->name); |
321 | else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) |
322 | netdev_info(dev, format: "%s: unknown port module inserted\n" , |
323 | dev->name); |
324 | else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) |
325 | netdev_info(dev, format: "%s: transceiver module error\n" , dev->name); |
326 | else |
327 | netdev_info(dev, format: "%s: unknown module type %d inserted\n" , |
328 | dev->name, pi->mod_type); |
329 | |
330 | /* If the interface is running, then we'll need any "sticky" Link |
331 | * Parameters redone with a new Transceiver Module. |
332 | */ |
333 | pi->link_cfg.redo_l1cfg = netif_running(dev); |
334 | } |
335 | |
336 | int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ |
337 | module_param(dbfifo_int_thresh, int, 0644); |
338 | MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold" ); |
339 | |
340 | /* |
341 | * usecs to sleep while draining the dbfifo |
342 | */ |
343 | static int dbfifo_drain_delay = 1000; |
344 | module_param(dbfifo_drain_delay, int, 0644); |
345 | MODULE_PARM_DESC(dbfifo_drain_delay, |
346 | "usecs to sleep while draining the dbfifo" ); |
347 | |
348 | static inline int cxgb4_set_addr_hash(struct port_info *pi) |
349 | { |
350 | struct adapter *adap = pi->adapter; |
351 | u64 vec = 0; |
352 | bool ucast = false; |
353 | struct hash_mac_addr *entry; |
354 | |
355 | /* Calculate the hash vector for the updated list and program it */ |
356 | list_for_each_entry(entry, &adap->mac_hlist, list) { |
357 | ucast |= is_unicast_ether_addr(addr: entry->addr); |
358 | vec |= (1ULL << hash_mac_addr(addr: entry->addr)); |
359 | } |
360 | return t4_set_addr_hash(adap, mbox: adap->mbox, viid: pi->viid, ucast, |
361 | vec, sleep_ok: false); |
362 | } |
363 | |
364 | static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) |
365 | { |
366 | struct port_info *pi = netdev_priv(dev: netdev); |
367 | struct adapter *adap = pi->adapter; |
368 | int ret; |
369 | u64 mhash = 0; |
370 | u64 uhash = 0; |
371 | /* idx stores the index of allocated filters, |
372 | * its size should be modified based on the number of |
373 | * MAC addresses that we allocate filters for |
374 | */ |
375 | |
376 | u16 idx[1] = {}; |
377 | bool free = false; |
378 | bool ucast = is_unicast_ether_addr(addr: mac_addr); |
379 | const u8 *maclist[1] = {mac_addr}; |
380 | struct hash_mac_addr *new_entry; |
381 | |
382 | ret = cxgb4_alloc_mac_filt(adap, viid: pi->viid, free, naddr: 1, addr: maclist, |
383 | idx, hash: ucast ? &uhash : &mhash, sleep_ok: false); |
384 | if (ret < 0) |
385 | goto out; |
386 | /* if hash != 0, then add the addr to hash addr list |
387 | * so on the end we will calculate the hash for the |
388 | * list and program it |
389 | */ |
390 | if (uhash || mhash) { |
391 | new_entry = kzalloc(size: sizeof(*new_entry), GFP_ATOMIC); |
392 | if (!new_entry) |
393 | return -ENOMEM; |
394 | ether_addr_copy(dst: new_entry->addr, src: mac_addr); |
395 | list_add_tail(new: &new_entry->list, head: &adap->mac_hlist); |
396 | ret = cxgb4_set_addr_hash(pi); |
397 | } |
398 | out: |
399 | return ret < 0 ? ret : 0; |
400 | } |
401 | |
402 | static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) |
403 | { |
404 | struct port_info *pi = netdev_priv(dev: netdev); |
405 | struct adapter *adap = pi->adapter; |
406 | int ret; |
407 | const u8 *maclist[1] = {mac_addr}; |
408 | struct hash_mac_addr *entry, *tmp; |
409 | |
410 | /* If the MAC address to be removed is in the hash addr |
411 | * list, delete it from the list and update hash vector |
412 | */ |
413 | list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { |
414 | if (ether_addr_equal(addr1: entry->addr, addr2: mac_addr)) { |
415 | list_del(entry: &entry->list); |
416 | kfree(objp: entry); |
417 | return cxgb4_set_addr_hash(pi); |
418 | } |
419 | } |
420 | |
421 | ret = cxgb4_free_mac_filt(adap, viid: pi->viid, naddr: 1, addr: maclist, sleep_ok: false); |
422 | return ret < 0 ? -EINVAL : 0; |
423 | } |
424 | |
425 | /* |
426 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. |
427 | * If @mtu is -1 it is left unchanged. |
428 | */ |
429 | static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) |
430 | { |
431 | struct port_info *pi = netdev_priv(dev); |
432 | struct adapter *adapter = pi->adapter; |
433 | |
434 | __dev_uc_sync(dev, sync: cxgb4_mac_sync, unsync: cxgb4_mac_unsync); |
435 | __dev_mc_sync(dev, sync: cxgb4_mac_sync, unsync: cxgb4_mac_unsync); |
436 | |
437 | return t4_set_rxmode(adap: adapter, mbox: adapter->mbox, viid: pi->viid, viid_mirror: pi->viid_mirror, |
438 | mtu, promisc: (dev->flags & IFF_PROMISC) ? 1 : 0, |
439 | all_multi: (dev->flags & IFF_ALLMULTI) ? 1 : 0, bcast: 1, vlanex: -1, |
440 | sleep_ok); |
441 | } |
442 | |
443 | /** |
444 | * cxgb4_change_mac - Update match filter for a MAC address. |
445 | * @pi: the port_info |
446 | * @viid: the VI id |
447 | * @tcam_idx: TCAM index of existing filter for old value of MAC address, |
448 | * or -1 |
449 | * @addr: the new MAC address value |
450 | * @persist: whether a new MAC allocation should be persistent |
451 | * @smt_idx: the destination to store the new SMT index. |
452 | * |
453 | * Modifies an MPS filter and sets it to the new MAC address if |
454 | * @tcam_idx >= 0, or adds the MAC address to a new filter if |
455 | * @tcam_idx < 0. In the latter case the address is added persistently |
456 | * if @persist is %true. |
457 | * Addresses are programmed to hash region, if tcam runs out of entries. |
458 | * |
459 | */ |
460 | int cxgb4_change_mac(struct port_info *pi, unsigned int viid, |
461 | int *tcam_idx, const u8 *addr, bool persist, |
462 | u8 *smt_idx) |
463 | { |
464 | struct adapter *adapter = pi->adapter; |
465 | struct hash_mac_addr *entry, *new_entry; |
466 | int ret; |
467 | |
468 | ret = t4_change_mac(adap: adapter, mbox: adapter->mbox, viid, |
469 | idx: *tcam_idx, addr, persist, smt_idx); |
470 | /* We ran out of TCAM entries. try programming hash region. */ |
471 | if (ret == -ENOMEM) { |
472 | /* If the MAC address to be updated is in the hash addr |
473 | * list, update it from the list |
474 | */ |
475 | list_for_each_entry(entry, &adapter->mac_hlist, list) { |
476 | if (entry->iface_mac) { |
477 | ether_addr_copy(dst: entry->addr, src: addr); |
478 | goto set_hash; |
479 | } |
480 | } |
481 | new_entry = kzalloc(size: sizeof(*new_entry), GFP_KERNEL); |
482 | if (!new_entry) |
483 | return -ENOMEM; |
484 | ether_addr_copy(dst: new_entry->addr, src: addr); |
485 | new_entry->iface_mac = true; |
486 | list_add_tail(new: &new_entry->list, head: &adapter->mac_hlist); |
487 | set_hash: |
488 | ret = cxgb4_set_addr_hash(pi); |
489 | } else if (ret >= 0) { |
490 | *tcam_idx = ret; |
491 | ret = 0; |
492 | } |
493 | |
494 | return ret; |
495 | } |
496 | |
497 | /* |
498 | * link_start - enable a port |
499 | * @dev: the port to enable |
500 | * |
501 | * Performs the MAC and PHY actions needed to enable a port. |
502 | */ |
503 | static int link_start(struct net_device *dev) |
504 | { |
505 | struct port_info *pi = netdev_priv(dev); |
506 | unsigned int mb = pi->adapter->mbox; |
507 | int ret; |
508 | |
509 | /* |
510 | * We do not set address filters and promiscuity here, the stack does |
511 | * that step explicitly. |
512 | */ |
513 | ret = t4_set_rxmode(adap: pi->adapter, mbox: mb, viid: pi->viid, viid_mirror: pi->viid_mirror, |
514 | mtu: dev->mtu, promisc: -1, all_multi: -1, bcast: -1, |
515 | vlanex: !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), sleep_ok: true); |
516 | if (ret == 0) |
517 | ret = cxgb4_update_mac_filt(pi, viid: pi->viid, tcam_idx: &pi->xact_addr_filt, |
518 | addr: dev->dev_addr, persistent: true, smt_idx: &pi->smt_idx); |
519 | if (ret == 0) |
520 | ret = t4_link_l1cfg(adapter: pi->adapter, mbox: mb, port: pi->tx_chan, |
521 | lc: &pi->link_cfg); |
522 | if (ret == 0) { |
523 | local_bh_disable(); |
524 | ret = t4_enable_pi_params(adap: pi->adapter, mbox: mb, pi, rx_en: true, |
525 | tx_en: true, CXGB4_DCB_ENABLED); |
526 | local_bh_enable(); |
527 | } |
528 | |
529 | return ret; |
530 | } |
531 | |
532 | #ifdef CONFIG_CHELSIO_T4_DCB |
533 | /* Handle a Data Center Bridging update message from the firmware. */ |
534 | static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) |
535 | { |
536 | int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); |
537 | struct net_device *dev = adap->port[adap->chan_map[port]]; |
538 | int old_dcb_enabled = cxgb4_dcb_enabled(dev); |
539 | int new_dcb_enabled; |
540 | |
541 | cxgb4_dcb_handle_fw_update(adap, pcmd); |
542 | new_dcb_enabled = cxgb4_dcb_enabled(dev); |
543 | |
544 | /* If the DCB has become enabled or disabled on the port then we're |
545 | * going to need to set up/tear down DCB Priority parameters for the |
546 | * TX Queues associated with the port. |
547 | */ |
548 | if (new_dcb_enabled != old_dcb_enabled) |
549 | dcb_tx_queue_prio_enable(dev, enable: new_dcb_enabled); |
550 | } |
551 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
552 | |
553 | /* Response queue handler for the FW event queue. |
554 | */ |
555 | static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, |
556 | const struct pkt_gl *gl) |
557 | { |
558 | u8 opcode = ((const struct rss_header *)rsp)->opcode; |
559 | |
560 | rsp++; /* skip RSS header */ |
561 | |
562 | /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. |
563 | */ |
564 | if (unlikely(opcode == CPL_FW4_MSG && |
565 | ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { |
566 | rsp++; |
567 | opcode = ((const struct rss_header *)rsp)->opcode; |
568 | rsp++; |
569 | if (opcode != CPL_SGE_EGR_UPDATE) { |
570 | dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" |
571 | , opcode); |
572 | goto out; |
573 | } |
574 | } |
575 | |
576 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { |
577 | const struct cpl_sge_egr_update *p = (void *)rsp; |
578 | unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); |
579 | struct sge_txq *txq; |
580 | |
581 | txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; |
582 | txq->restarts++; |
583 | if (txq->q_type == CXGB4_TXQ_ETH) { |
584 | struct sge_eth_txq *eq; |
585 | |
586 | eq = container_of(txq, struct sge_eth_txq, q); |
587 | t4_sge_eth_txq_egress_update(adap: q->adap, q: eq, maxreclaim: -1); |
588 | } else { |
589 | struct sge_uld_txq *oq; |
590 | |
591 | oq = container_of(txq, struct sge_uld_txq, q); |
592 | tasklet_schedule(t: &oq->qresume_tsk); |
593 | } |
594 | } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { |
595 | const struct cpl_fw6_msg *p = (void *)rsp; |
596 | |
597 | #ifdef CONFIG_CHELSIO_T4_DCB |
598 | const struct fw_port_cmd *pcmd = (const void *)p->data; |
599 | unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); |
600 | unsigned int action = |
601 | FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); |
602 | |
603 | if (cmd == FW_PORT_CMD && |
604 | (action == FW_PORT_ACTION_GET_PORT_INFO || |
605 | action == FW_PORT_ACTION_GET_PORT_INFO32)) { |
606 | int port = FW_PORT_CMD_PORTID_G( |
607 | be32_to_cpu(pcmd->op_to_portid)); |
608 | struct net_device *dev; |
609 | int dcbxdis, state_input; |
610 | |
611 | dev = q->adap->port[q->adap->chan_map[port]]; |
612 | dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO |
613 | ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) |
614 | : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) |
615 | & FW_PORT_CMD_DCBXDIS32_F)); |
616 | state_input = (dcbxdis |
617 | ? CXGB4_DCB_INPUT_FW_DISABLED |
618 | : CXGB4_DCB_INPUT_FW_ENABLED); |
619 | |
620 | cxgb4_dcb_state_fsm(dev, state_input); |
621 | } |
622 | |
623 | if (cmd == FW_PORT_CMD && |
624 | action == FW_PORT_ACTION_L2_DCB_CFG) |
625 | dcb_rpl(adap: q->adap, pcmd); |
626 | else |
627 | #endif |
628 | if (p->type == 0) |
629 | t4_handle_fw_rpl(adap: q->adap, rpl: p->data); |
630 | } else if (opcode == CPL_L2T_WRITE_RPL) { |
631 | const struct cpl_l2t_write_rpl *p = (void *)rsp; |
632 | |
633 | do_l2t_write_rpl(p: q->adap, rpl: p); |
634 | } else if (opcode == CPL_SMT_WRITE_RPL) { |
635 | const struct cpl_smt_write_rpl *p = (void *)rsp; |
636 | |
637 | do_smt_write_rpl(p: q->adap, rpl: p); |
638 | } else if (opcode == CPL_SET_TCB_RPL) { |
639 | const struct cpl_set_tcb_rpl *p = (void *)rsp; |
640 | |
641 | filter_rpl(adap: q->adap, rpl: p); |
642 | } else if (opcode == CPL_ACT_OPEN_RPL) { |
643 | const struct cpl_act_open_rpl *p = (void *)rsp; |
644 | |
645 | hash_filter_rpl(adap: q->adap, rpl: p); |
646 | } else if (opcode == CPL_ABORT_RPL_RSS) { |
647 | const struct cpl_abort_rpl_rss *p = (void *)rsp; |
648 | |
649 | hash_del_filter_rpl(adap: q->adap, rpl: p); |
650 | } else if (opcode == CPL_SRQ_TABLE_RPL) { |
651 | const struct cpl_srq_table_rpl *p = (void *)rsp; |
652 | |
653 | do_srq_table_rpl(adap: q->adap, rpl: p); |
654 | } else |
655 | dev_err(q->adap->pdev_dev, |
656 | "unexpected CPL %#x on FW event queue\n" , opcode); |
657 | out: |
658 | return 0; |
659 | } |
660 | |
661 | static void disable_msi(struct adapter *adapter) |
662 | { |
663 | if (adapter->flags & CXGB4_USING_MSIX) { |
664 | pci_disable_msix(dev: adapter->pdev); |
665 | adapter->flags &= ~CXGB4_USING_MSIX; |
666 | } else if (adapter->flags & CXGB4_USING_MSI) { |
667 | pci_disable_msi(dev: adapter->pdev); |
668 | adapter->flags &= ~CXGB4_USING_MSI; |
669 | } |
670 | } |
671 | |
672 | /* |
673 | * Interrupt handler for non-data events used with MSI-X. |
674 | */ |
675 | static irqreturn_t t4_nondata_intr(int irq, void *cookie) |
676 | { |
677 | struct adapter *adap = cookie; |
678 | u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); |
679 | |
680 | if (v & PFSW_F) { |
681 | adap->swintr = 1; |
682 | t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), val: v); |
683 | } |
684 | if (adap->flags & CXGB4_MASTER_PF) |
685 | t4_slow_intr_handler(adapter: adap); |
686 | return IRQ_HANDLED; |
687 | } |
688 | |
689 | int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, |
690 | cpumask_var_t *aff_mask, int idx) |
691 | { |
692 | int rv; |
693 | |
694 | if (!zalloc_cpumask_var(mask: aff_mask, GFP_KERNEL)) { |
695 | dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n" ); |
696 | return -ENOMEM; |
697 | } |
698 | |
699 | cpumask_set_cpu(cpu: cpumask_local_spread(i: idx, node: dev_to_node(dev: adap->pdev_dev)), |
700 | dstp: *aff_mask); |
701 | |
702 | rv = irq_set_affinity_hint(irq: vec, m: *aff_mask); |
703 | if (rv) |
704 | dev_warn(adap->pdev_dev, |
705 | "irq_set_affinity_hint %u failed %d\n" , |
706 | vec, rv); |
707 | |
708 | return 0; |
709 | } |
710 | |
711 | void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask) |
712 | { |
713 | irq_set_affinity_hint(irq: vec, NULL); |
714 | free_cpumask_var(mask: aff_mask); |
715 | } |
716 | |
717 | static int request_msix_queue_irqs(struct adapter *adap) |
718 | { |
719 | struct sge *s = &adap->sge; |
720 | struct msix_info *minfo; |
721 | int err, ethqidx; |
722 | |
723 | if (s->fwevtq_msix_idx < 0) |
724 | return -ENOMEM; |
725 | |
726 | err = request_irq(irq: adap->msix_info[s->fwevtq_msix_idx].vec, |
727 | handler: t4_sge_intr_msix, flags: 0, |
728 | name: adap->msix_info[s->fwevtq_msix_idx].desc, |
729 | dev: &s->fw_evtq); |
730 | if (err) |
731 | return err; |
732 | |
733 | for_each_ethrxq(s, ethqidx) { |
734 | minfo = s->ethrxq[ethqidx].msix; |
735 | err = request_irq(irq: minfo->vec, |
736 | handler: t4_sge_intr_msix, flags: 0, |
737 | name: minfo->desc, |
738 | dev: &s->ethrxq[ethqidx].rspq); |
739 | if (err) |
740 | goto unwind; |
741 | |
742 | cxgb4_set_msix_aff(adap, vec: minfo->vec, |
743 | aff_mask: &minfo->aff_mask, idx: ethqidx); |
744 | } |
745 | return 0; |
746 | |
747 | unwind: |
748 | while (--ethqidx >= 0) { |
749 | minfo = s->ethrxq[ethqidx].msix; |
750 | cxgb4_clear_msix_aff(vec: minfo->vec, aff_mask: minfo->aff_mask); |
751 | free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); |
752 | } |
753 | free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); |
754 | return err; |
755 | } |
756 | |
757 | static void free_msix_queue_irqs(struct adapter *adap) |
758 | { |
759 | struct sge *s = &adap->sge; |
760 | struct msix_info *minfo; |
761 | int i; |
762 | |
763 | free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); |
764 | for_each_ethrxq(s, i) { |
765 | minfo = s->ethrxq[i].msix; |
766 | cxgb4_clear_msix_aff(vec: minfo->vec, aff_mask: minfo->aff_mask); |
767 | free_irq(minfo->vec, &s->ethrxq[i].rspq); |
768 | } |
769 | } |
770 | |
771 | static int setup_ppod_edram(struct adapter *adap) |
772 | { |
773 | unsigned int param, val; |
774 | int ret; |
775 | |
776 | /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check |
777 | * if firmware supports ppod edram feature or not. If firmware |
778 | * returns 1, then driver can enable this feature by sending |
779 | * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to |
780 | * enable ppod edram feature. |
781 | */ |
782 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
783 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM)); |
784 | |
785 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params: ¶m, val: &val); |
786 | if (ret < 0) { |
787 | dev_warn(adap->pdev_dev, |
788 | "querying PPOD_EDRAM support failed: %d\n" , |
789 | ret); |
790 | return -1; |
791 | } |
792 | |
793 | if (val != 1) |
794 | return -1; |
795 | |
796 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params: ¶m, val: &val); |
797 | if (ret < 0) { |
798 | dev_err(adap->pdev_dev, |
799 | "setting PPOD_EDRAM failed: %d\n" , ret); |
800 | return -1; |
801 | } |
802 | return 0; |
803 | } |
804 | |
805 | static void adap_config_hpfilter(struct adapter *adapter) |
806 | { |
807 | u32 param, val = 0; |
808 | int ret; |
809 | |
810 | /* Enable HP filter region. Older fw will fail this request and |
811 | * it is fine. |
812 | */ |
813 | param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); |
814 | ret = t4_set_params(adap: adapter, mbox: adapter->mbox, pf: adapter->pf, vf: 0, |
815 | nparams: 1, params: ¶m, val: &val); |
816 | |
817 | /* An error means FW doesn't know about HP filter support, |
818 | * it's not a problem, don't return an error. |
819 | */ |
820 | if (ret < 0) |
821 | dev_err(adapter->pdev_dev, |
822 | "HP filter region isn't supported by FW\n" ); |
823 | } |
824 | |
825 | static int (const struct port_info *pi, u16 *, |
826 | u16 , u16 viid) |
827 | { |
828 | struct adapter *adap = pi->adapter; |
829 | int ret; |
830 | |
831 | ret = t4_config_rss_range(adapter: adap, mbox: adap->mbox, viid, start: 0, n: rss_size, rspq: rss, |
832 | nrspq: rss_size); |
833 | if (ret) |
834 | return ret; |
835 | |
836 | /* If Tunnel All Lookup isn't specified in the global RSS |
837 | * Configuration, then we need to specify a default Ingress |
838 | * Queue for any ingress packets which aren't hashed. We'll |
839 | * use our first ingress queue ... |
840 | */ |
841 | return t4_config_vi_rss(adapter: adap, mbox: adap->mbox, viid, |
842 | FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | |
843 | FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | |
844 | FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | |
845 | FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | |
846 | FW_RSS_VI_CONFIG_CMD_UDPEN_F, |
847 | defq: rss[0]); |
848 | } |
849 | |
850 | /** |
851 | * cxgb4_write_rss - write the RSS table for a given port |
852 | * @pi: the port |
853 | * @queues: array of queue indices for RSS |
854 | * |
855 | * Sets up the portion of the HW RSS table for the port's VI to distribute |
856 | * packets to the Rx queues in @queues. |
857 | * Should never be called before setting up sge eth rx queues |
858 | */ |
859 | int (const struct port_info *pi, const u16 *queues) |
860 | { |
861 | struct adapter *adapter = pi->adapter; |
862 | const struct sge_eth_rxq *rxq; |
863 | int i, err; |
864 | u16 *; |
865 | |
866 | rxq = &adapter->sge.ethrxq[pi->first_qset]; |
867 | rss = kmalloc_array(n: pi->rss_size, size: sizeof(u16), GFP_KERNEL); |
868 | if (!rss) |
869 | return -ENOMEM; |
870 | |
871 | /* map the queue indices to queue ids */ |
872 | for (i = 0; i < pi->rss_size; i++, queues++) |
873 | rss[i] = rxq[*queues].rspq.abs_id; |
874 | |
875 | err = cxgb4_config_rss(pi, rss, rss_size: pi->rss_size, viid: pi->viid); |
876 | kfree(objp: rss); |
877 | return err; |
878 | } |
879 | |
880 | /** |
881 | * setup_rss - configure RSS |
882 | * @adap: the adapter |
883 | * |
884 | * Sets up RSS for each port. |
885 | */ |
886 | static int (struct adapter *adap) |
887 | { |
888 | int i, j, err; |
889 | |
890 | for_each_port(adap, i) { |
891 | const struct port_info *pi = adap2pinfo(adap, idx: i); |
892 | |
893 | /* Fill default values with equal distribution */ |
894 | for (j = 0; j < pi->rss_size; j++) |
895 | pi->rss[j] = j % pi->nqsets; |
896 | |
897 | err = cxgb4_write_rss(pi, queues: pi->rss); |
898 | if (err) |
899 | return err; |
900 | } |
901 | return 0; |
902 | } |
903 | |
904 | /* |
905 | * Return the channel of the ingress queue with the given qid. |
906 | */ |
907 | static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) |
908 | { |
909 | qid -= p->ingr_start; |
910 | return netdev2pinfo(dev: p->ingr_map[qid]->netdev)->tx_chan; |
911 | } |
912 | |
913 | void cxgb4_quiesce_rx(struct sge_rspq *q) |
914 | { |
915 | if (q->handler) |
916 | napi_disable(n: &q->napi); |
917 | } |
918 | |
919 | /* |
920 | * Wait until all NAPI handlers are descheduled. |
921 | */ |
922 | static void quiesce_rx(struct adapter *adap) |
923 | { |
924 | int i; |
925 | |
926 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
927 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
928 | |
929 | if (!q) |
930 | continue; |
931 | |
932 | cxgb4_quiesce_rx(q); |
933 | } |
934 | } |
935 | |
936 | /* Disable interrupt and napi handler */ |
937 | static void disable_interrupts(struct adapter *adap) |
938 | { |
939 | struct sge *s = &adap->sge; |
940 | |
941 | if (adap->flags & CXGB4_FULL_INIT_DONE) { |
942 | t4_intr_disable(adapter: adap); |
943 | if (adap->flags & CXGB4_USING_MSIX) { |
944 | free_msix_queue_irqs(adap); |
945 | free_irq(adap->msix_info[s->nd_msix_idx].vec, |
946 | adap); |
947 | } else { |
948 | free_irq(adap->pdev->irq, adap); |
949 | } |
950 | quiesce_rx(adap); |
951 | } |
952 | } |
953 | |
954 | void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q) |
955 | { |
956 | if (q->handler) |
957 | napi_enable(n: &q->napi); |
958 | |
959 | /* 0-increment GTS to start the timer and enable interrupts */ |
960 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
961 | SEINTARM_V(q->intr_params) | |
962 | INGRESSQID_V(q->cntxt_id)); |
963 | } |
964 | |
965 | /* |
966 | * Enable NAPI scheduling and interrupt generation for all Rx queues. |
967 | */ |
968 | static void enable_rx(struct adapter *adap) |
969 | { |
970 | int i; |
971 | |
972 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
973 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
974 | |
975 | if (!q) |
976 | continue; |
977 | |
978 | cxgb4_enable_rx(adap, q); |
979 | } |
980 | } |
981 | |
982 | static int setup_non_data_intr(struct adapter *adap) |
983 | { |
984 | int msix; |
985 | |
986 | adap->sge.nd_msix_idx = -1; |
987 | if (!(adap->flags & CXGB4_USING_MSIX)) |
988 | return 0; |
989 | |
990 | /* Request MSI-X vector for non-data interrupt */ |
991 | msix = cxgb4_get_msix_idx_from_bmap(adap); |
992 | if (msix < 0) |
993 | return -ENOMEM; |
994 | |
995 | snprintf(buf: adap->msix_info[msix].desc, |
996 | size: sizeof(adap->msix_info[msix].desc), |
997 | fmt: "%s" , adap->port[0]->name); |
998 | |
999 | adap->sge.nd_msix_idx = msix; |
1000 | return 0; |
1001 | } |
1002 | |
1003 | static int setup_fw_sge_queues(struct adapter *adap) |
1004 | { |
1005 | struct sge *s = &adap->sge; |
1006 | int msix, err = 0; |
1007 | |
1008 | bitmap_zero(dst: s->starving_fl, nbits: s->egr_sz); |
1009 | bitmap_zero(dst: s->txq_maperr, nbits: s->egr_sz); |
1010 | |
1011 | if (adap->flags & CXGB4_USING_MSIX) { |
1012 | s->fwevtq_msix_idx = -1; |
1013 | msix = cxgb4_get_msix_idx_from_bmap(adap); |
1014 | if (msix < 0) |
1015 | return -ENOMEM; |
1016 | |
1017 | snprintf(buf: adap->msix_info[msix].desc, |
1018 | size: sizeof(adap->msix_info[msix].desc), |
1019 | fmt: "%s-FWeventq" , adap->port[0]->name); |
1020 | } else { |
1021 | err = t4_sge_alloc_rxq(adap, iq: &s->intrq, fwevtq: false, dev: adap->port[0], intr_idx: 0, |
1022 | NULL, NULL, NULL, cong: -1); |
1023 | if (err) |
1024 | return err; |
1025 | msix = -((int)s->intrq.abs_id + 1); |
1026 | } |
1027 | |
1028 | err = t4_sge_alloc_rxq(adap, iq: &s->fw_evtq, fwevtq: true, dev: adap->port[0], |
1029 | intr_idx: msix, NULL, hnd: fwevtq_handler, NULL, cong: -1); |
1030 | if (err && msix >= 0) |
1031 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: msix); |
1032 | |
1033 | s->fwevtq_msix_idx = msix; |
1034 | return err; |
1035 | } |
1036 | |
1037 | /** |
1038 | * setup_sge_queues - configure SGE Tx/Rx/response queues |
1039 | * @adap: the adapter |
1040 | * |
1041 | * Determines how many sets of SGE queues to use and initializes them. |
1042 | * We support multiple queue sets per port if we have MSI-X, otherwise |
1043 | * just one queue set per port. |
1044 | */ |
1045 | static int setup_sge_queues(struct adapter *adap) |
1046 | { |
1047 | struct sge_uld_rxq_info *rxq_info = NULL; |
1048 | struct sge *s = &adap->sge; |
1049 | unsigned int cmplqid = 0; |
1050 | int err, i, j, msix = 0; |
1051 | |
1052 | if (is_uld(adap)) |
1053 | rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; |
1054 | |
1055 | if (!(adap->flags & CXGB4_USING_MSIX)) |
1056 | msix = -((int)s->intrq.abs_id + 1); |
1057 | |
1058 | for_each_port(adap, i) { |
1059 | struct net_device *dev = adap->port[i]; |
1060 | struct port_info *pi = netdev_priv(dev); |
1061 | struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; |
1062 | struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; |
1063 | |
1064 | for (j = 0; j < pi->nqsets; j++, q++) { |
1065 | if (msix >= 0) { |
1066 | msix = cxgb4_get_msix_idx_from_bmap(adap); |
1067 | if (msix < 0) { |
1068 | err = msix; |
1069 | goto freeout; |
1070 | } |
1071 | |
1072 | snprintf(buf: adap->msix_info[msix].desc, |
1073 | size: sizeof(adap->msix_info[msix].desc), |
1074 | fmt: "%s-Rx%d" , dev->name, j); |
1075 | q->msix = &adap->msix_info[msix]; |
1076 | } |
1077 | |
1078 | err = t4_sge_alloc_rxq(adap, iq: &q->rspq, fwevtq: false, dev, |
1079 | intr_idx: msix, fl: &q->fl, |
1080 | hnd: t4_ethrx_handler, |
1081 | NULL, |
1082 | cong: t4_get_tp_ch_map(adapter: adap, |
1083 | pidx: pi->tx_chan)); |
1084 | if (err) |
1085 | goto freeout; |
1086 | q->rspq.idx = j; |
1087 | memset(&q->stats, 0, sizeof(q->stats)); |
1088 | } |
1089 | |
1090 | q = &s->ethrxq[pi->first_qset]; |
1091 | for (j = 0; j < pi->nqsets; j++, t++, q++) { |
1092 | err = t4_sge_alloc_eth_txq(adap, txq: t, dev, |
1093 | netdevq: netdev_get_tx_queue(dev, index: j), |
1094 | iqid: q->rspq.cntxt_id, |
1095 | dbqt: !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); |
1096 | if (err) |
1097 | goto freeout; |
1098 | } |
1099 | } |
1100 | |
1101 | for_each_port(adap, i) { |
1102 | /* Note that cmplqid below is 0 if we don't |
1103 | * have RDMA queues, and that's the right value. |
1104 | */ |
1105 | if (rxq_info) |
1106 | cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; |
1107 | |
1108 | err = t4_sge_alloc_ctrl_txq(adap, txq: &s->ctrlq[i], dev: adap->port[i], |
1109 | iqid: s->fw_evtq.cntxt_id, cmplqid); |
1110 | if (err) |
1111 | goto freeout; |
1112 | } |
1113 | |
1114 | if (!is_t4(chip: adap->params.chip)) { |
1115 | err = t4_sge_alloc_eth_txq(adap, txq: &s->ptptxq, dev: adap->port[0], |
1116 | netdevq: netdev_get_tx_queue(dev: adap->port[0], index: 0) |
1117 | , iqid: s->fw_evtq.cntxt_id, dbqt: false); |
1118 | if (err) |
1119 | goto freeout; |
1120 | } |
1121 | |
1122 | t4_write_reg(adap, reg_addr: is_t4(chip: adap->params.chip) ? |
1123 | MPS_TRC_RSS_CONTROL_A : |
1124 | MPS_T5_TRC_RSS_CONTROL_A, |
1125 | RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | |
1126 | QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); |
1127 | return 0; |
1128 | freeout: |
1129 | dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n" , -err); |
1130 | t4_free_sge_resources(adap); |
1131 | return err; |
1132 | } |
1133 | |
1134 | static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, |
1135 | struct net_device *sb_dev) |
1136 | { |
1137 | int txq; |
1138 | |
1139 | #ifdef CONFIG_CHELSIO_T4_DCB |
1140 | /* If a Data Center Bridging has been successfully negotiated on this |
1141 | * link then we'll use the skb's priority to map it to a TX Queue. |
1142 | * The skb's priority is determined via the VLAN Tag Priority Code |
1143 | * Point field. |
1144 | */ |
1145 | if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) { |
1146 | u16 vlan_tci; |
1147 | int err; |
1148 | |
1149 | err = vlan_get_tag(skb, vlan_tci: &vlan_tci); |
1150 | if (unlikely(err)) { |
1151 | if (net_ratelimit()) |
1152 | netdev_warn(dev, |
1153 | format: "TX Packet without VLAN Tag on DCB Link\n" ); |
1154 | txq = 0; |
1155 | } else { |
1156 | txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; |
1157 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1158 | if (skb->protocol == htons(ETH_P_FCOE)) |
1159 | txq = skb->priority & 0x7; |
1160 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
1161 | } |
1162 | return txq; |
1163 | } |
1164 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
1165 | |
1166 | if (dev->num_tc) { |
1167 | struct port_info *pi = netdev2pinfo(dev); |
1168 | u8 ver, proto; |
1169 | |
1170 | ver = ip_hdr(skb)->version; |
1171 | proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : |
1172 | ip_hdr(skb)->protocol; |
1173 | |
1174 | /* Send unsupported traffic pattern to normal NIC queues. */ |
1175 | txq = netdev_pick_tx(dev, skb, sb_dev); |
1176 | if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || |
1177 | skb->encapsulation || |
1178 | tls_is_skb_tx_device_offloaded(skb) || |
1179 | (proto != IPPROTO_TCP && proto != IPPROTO_UDP)) |
1180 | txq = txq % pi->nqsets; |
1181 | |
1182 | return txq; |
1183 | } |
1184 | |
1185 | if (select_queue) { |
1186 | txq = (skb_rx_queue_recorded(skb) |
1187 | ? skb_get_rx_queue(skb) |
1188 | : smp_processor_id()); |
1189 | |
1190 | while (unlikely(txq >= dev->real_num_tx_queues)) |
1191 | txq -= dev->real_num_tx_queues; |
1192 | |
1193 | return txq; |
1194 | } |
1195 | |
1196 | return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; |
1197 | } |
1198 | |
1199 | static int closest_timer(const struct sge *s, int time) |
1200 | { |
1201 | int i, delta, match = 0, min_delta = INT_MAX; |
1202 | |
1203 | for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { |
1204 | delta = time - s->timer_val[i]; |
1205 | if (delta < 0) |
1206 | delta = -delta; |
1207 | if (delta < min_delta) { |
1208 | min_delta = delta; |
1209 | match = i; |
1210 | } |
1211 | } |
1212 | return match; |
1213 | } |
1214 | |
1215 | static int closest_thres(const struct sge *s, int thres) |
1216 | { |
1217 | int i, delta, match = 0, min_delta = INT_MAX; |
1218 | |
1219 | for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { |
1220 | delta = thres - s->counter_val[i]; |
1221 | if (delta < 0) |
1222 | delta = -delta; |
1223 | if (delta < min_delta) { |
1224 | min_delta = delta; |
1225 | match = i; |
1226 | } |
1227 | } |
1228 | return match; |
1229 | } |
1230 | |
1231 | /** |
1232 | * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters |
1233 | * @q: the Rx queue |
1234 | * @us: the hold-off time in us, or 0 to disable timer |
1235 | * @cnt: the hold-off packet count, or 0 to disable counter |
1236 | * |
1237 | * Sets an Rx queue's interrupt hold-off time and packet count. At least |
1238 | * one of the two needs to be enabled for the queue to generate interrupts. |
1239 | */ |
1240 | int cxgb4_set_rspq_intr_params(struct sge_rspq *q, |
1241 | unsigned int us, unsigned int cnt) |
1242 | { |
1243 | struct adapter *adap = q->adap; |
1244 | |
1245 | if ((us | cnt) == 0) |
1246 | cnt = 1; |
1247 | |
1248 | if (cnt) { |
1249 | int err; |
1250 | u32 v, new_idx; |
1251 | |
1252 | new_idx = closest_thres(s: &adap->sge, thres: cnt); |
1253 | if (q->desc && q->pktcnt_idx != new_idx) { |
1254 | /* the queue has already been created, update it */ |
1255 | v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
1256 | FW_PARAMS_PARAM_X_V( |
1257 | FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | |
1258 | FW_PARAMS_PARAM_YZ_V(q->cntxt_id); |
1259 | err = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, |
1260 | params: &v, val: &new_idx); |
1261 | if (err) |
1262 | return err; |
1263 | } |
1264 | q->pktcnt_idx = new_idx; |
1265 | } |
1266 | |
1267 | us = us == 0 ? 6 : closest_timer(s: &adap->sge, time: us); |
1268 | q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); |
1269 | return 0; |
1270 | } |
1271 | |
1272 | static int cxgb_set_features(struct net_device *dev, netdev_features_t features) |
1273 | { |
1274 | netdev_features_t changed = dev->features ^ features; |
1275 | const struct port_info *pi = netdev_priv(dev); |
1276 | int err; |
1277 | |
1278 | if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) |
1279 | return 0; |
1280 | |
1281 | err = t4_set_rxmode(adap: pi->adapter, mbox: pi->adapter->mbox, viid: pi->viid, |
1282 | viid_mirror: pi->viid_mirror, mtu: -1, promisc: -1, all_multi: -1, bcast: -1, |
1283 | vlanex: !!(features & NETIF_F_HW_VLAN_CTAG_RX), sleep_ok: true); |
1284 | if (unlikely(err)) |
1285 | dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; |
1286 | return err; |
1287 | } |
1288 | |
1289 | static int setup_debugfs(struct adapter *adap) |
1290 | { |
1291 | if (IS_ERR_OR_NULL(ptr: adap->debugfs_root)) |
1292 | return -1; |
1293 | |
1294 | #ifdef CONFIG_DEBUG_FS |
1295 | t4_setup_debugfs(adap); |
1296 | #endif |
1297 | return 0; |
1298 | } |
1299 | |
1300 | static void cxgb4_port_mirror_free_rxq(struct adapter *adap, |
1301 | struct sge_eth_rxq *mirror_rxq) |
1302 | { |
1303 | if ((adap->flags & CXGB4_FULL_INIT_DONE) && |
1304 | !(adap->flags & CXGB4_SHUTTING_DOWN)) |
1305 | cxgb4_quiesce_rx(q: &mirror_rxq->rspq); |
1306 | |
1307 | if (adap->flags & CXGB4_USING_MSIX) { |
1308 | cxgb4_clear_msix_aff(vec: mirror_rxq->msix->vec, |
1309 | aff_mask: mirror_rxq->msix->aff_mask); |
1310 | free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); |
1311 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: mirror_rxq->msix->idx); |
1312 | } |
1313 | |
1314 | free_rspq_fl(adap, rq: &mirror_rxq->rspq, fl: &mirror_rxq->fl); |
1315 | } |
1316 | |
1317 | static int cxgb4_port_mirror_alloc_queues(struct net_device *dev) |
1318 | { |
1319 | struct port_info *pi = netdev2pinfo(dev); |
1320 | struct adapter *adap = netdev2adap(dev); |
1321 | struct sge_eth_rxq *mirror_rxq; |
1322 | struct sge *s = &adap->sge; |
1323 | int ret = 0, msix = 0; |
1324 | u16 i, rxqid; |
1325 | u16 *; |
1326 | |
1327 | if (!pi->vi_mirror_count) |
1328 | return 0; |
1329 | |
1330 | if (s->mirror_rxq[pi->port_id]) |
1331 | return 0; |
1332 | |
1333 | mirror_rxq = kcalloc(n: pi->nmirrorqsets, size: sizeof(*mirror_rxq), GFP_KERNEL); |
1334 | if (!mirror_rxq) |
1335 | return -ENOMEM; |
1336 | |
1337 | s->mirror_rxq[pi->port_id] = mirror_rxq; |
1338 | |
1339 | if (!(adap->flags & CXGB4_USING_MSIX)) |
1340 | msix = -((int)adap->sge.intrq.abs_id + 1); |
1341 | |
1342 | for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { |
1343 | mirror_rxq = &s->mirror_rxq[pi->port_id][i]; |
1344 | |
1345 | /* Allocate Mirror Rxqs */ |
1346 | if (msix >= 0) { |
1347 | msix = cxgb4_get_msix_idx_from_bmap(adap); |
1348 | if (msix < 0) { |
1349 | ret = msix; |
1350 | goto out_free_queues; |
1351 | } |
1352 | |
1353 | mirror_rxq->msix = &adap->msix_info[msix]; |
1354 | snprintf(buf: mirror_rxq->msix->desc, |
1355 | size: sizeof(mirror_rxq->msix->desc), |
1356 | fmt: "%s-mirrorrxq%d" , dev->name, i); |
1357 | } |
1358 | |
1359 | init_rspq(adap, q: &mirror_rxq->rspq, |
1360 | CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC, |
1361 | CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT, |
1362 | CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM, |
1363 | CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE); |
1364 | |
1365 | mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; |
1366 | |
1367 | ret = t4_sge_alloc_rxq(adap, iq: &mirror_rxq->rspq, fwevtq: false, |
1368 | dev, intr_idx: msix, fl: &mirror_rxq->fl, |
1369 | hnd: t4_ethrx_handler, NULL, cong: 0); |
1370 | if (ret) |
1371 | goto out_free_msix_idx; |
1372 | |
1373 | /* Setup MSI-X vectors for Mirror Rxqs */ |
1374 | if (adap->flags & CXGB4_USING_MSIX) { |
1375 | ret = request_irq(irq: mirror_rxq->msix->vec, |
1376 | handler: t4_sge_intr_msix, flags: 0, |
1377 | name: mirror_rxq->msix->desc, |
1378 | dev: &mirror_rxq->rspq); |
1379 | if (ret) |
1380 | goto out_free_rxq; |
1381 | |
1382 | cxgb4_set_msix_aff(adap, vec: mirror_rxq->msix->vec, |
1383 | aff_mask: &mirror_rxq->msix->aff_mask, idx: i); |
1384 | } |
1385 | |
1386 | /* Start NAPI for Mirror Rxqs */ |
1387 | cxgb4_enable_rx(adap, q: &mirror_rxq->rspq); |
1388 | } |
1389 | |
1390 | /* Setup RSS for Mirror Rxqs */ |
1391 | rss = kcalloc(n: pi->rss_size, size: sizeof(u16), GFP_KERNEL); |
1392 | if (!rss) { |
1393 | ret = -ENOMEM; |
1394 | goto out_free_queues; |
1395 | } |
1396 | |
1397 | mirror_rxq = &s->mirror_rxq[pi->port_id][0]; |
1398 | for (i = 0; i < pi->rss_size; i++) |
1399 | rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; |
1400 | |
1401 | ret = cxgb4_config_rss(pi, rss, rss_size: pi->rss_size, viid: pi->viid_mirror); |
1402 | kfree(objp: rss); |
1403 | if (ret) |
1404 | goto out_free_queues; |
1405 | |
1406 | return 0; |
1407 | |
1408 | out_free_rxq: |
1409 | free_rspq_fl(adap, rq: &mirror_rxq->rspq, fl: &mirror_rxq->fl); |
1410 | |
1411 | out_free_msix_idx: |
1412 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: mirror_rxq->msix->idx); |
1413 | |
1414 | out_free_queues: |
1415 | while (rxqid-- > 0) |
1416 | cxgb4_port_mirror_free_rxq(adap, |
1417 | mirror_rxq: &s->mirror_rxq[pi->port_id][rxqid]); |
1418 | |
1419 | kfree(objp: s->mirror_rxq[pi->port_id]); |
1420 | s->mirror_rxq[pi->port_id] = NULL; |
1421 | return ret; |
1422 | } |
1423 | |
1424 | static void cxgb4_port_mirror_free_queues(struct net_device *dev) |
1425 | { |
1426 | struct port_info *pi = netdev2pinfo(dev); |
1427 | struct adapter *adap = netdev2adap(dev); |
1428 | struct sge *s = &adap->sge; |
1429 | u16 i; |
1430 | |
1431 | if (!pi->vi_mirror_count) |
1432 | return; |
1433 | |
1434 | if (!s->mirror_rxq[pi->port_id]) |
1435 | return; |
1436 | |
1437 | for (i = 0; i < pi->nmirrorqsets; i++) |
1438 | cxgb4_port_mirror_free_rxq(adap, |
1439 | mirror_rxq: &s->mirror_rxq[pi->port_id][i]); |
1440 | |
1441 | kfree(objp: s->mirror_rxq[pi->port_id]); |
1442 | s->mirror_rxq[pi->port_id] = NULL; |
1443 | } |
1444 | |
1445 | static int cxgb4_port_mirror_start(struct net_device *dev) |
1446 | { |
1447 | struct port_info *pi = netdev2pinfo(dev); |
1448 | struct adapter *adap = netdev2adap(dev); |
1449 | int ret, idx = -1; |
1450 | |
1451 | if (!pi->vi_mirror_count) |
1452 | return 0; |
1453 | |
1454 | /* Mirror VIs can be created dynamically after stack had |
1455 | * already setup Rx modes like MTU, promisc, allmulti, etc. |
1456 | * on main VI. So, parse what the stack had setup on the |
1457 | * main VI and update the same on the mirror VI. |
1458 | */ |
1459 | ret = t4_set_rxmode(adap, mbox: adap->mbox, viid: pi->viid, viid_mirror: pi->viid_mirror, |
1460 | mtu: dev->mtu, promisc: (dev->flags & IFF_PROMISC) ? 1 : 0, |
1461 | all_multi: (dev->flags & IFF_ALLMULTI) ? 1 : 0, bcast: 1, |
1462 | vlanex: !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), sleep_ok: true); |
1463 | if (ret) { |
1464 | dev_err(adap->pdev_dev, |
1465 | "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n" , |
1466 | pi->viid_mirror, ret); |
1467 | return ret; |
1468 | } |
1469 | |
1470 | /* Enable replication bit for the device's MAC address |
1471 | * in MPS TCAM, so that the packets for the main VI are |
1472 | * replicated to mirror VI. |
1473 | */ |
1474 | ret = cxgb4_update_mac_filt(pi, viid: pi->viid_mirror, tcam_idx: &idx, |
1475 | addr: dev->dev_addr, persistent: true, NULL); |
1476 | if (ret) { |
1477 | dev_err(adap->pdev_dev, |
1478 | "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n" , |
1479 | pi->viid_mirror, ret); |
1480 | return ret; |
1481 | } |
1482 | |
1483 | /* Enabling a Virtual Interface can result in an interrupt |
1484 | * during the processing of the VI Enable command and, in some |
1485 | * paths, result in an attempt to issue another command in the |
1486 | * interrupt context. Thus, we disable interrupts during the |
1487 | * course of the VI Enable command ... |
1488 | */ |
1489 | local_bh_disable(); |
1490 | ret = t4_enable_vi_params(adap, mbox: adap->mbox, viid: pi->viid_mirror, rx_en: true, tx_en: true, |
1491 | dcb_en: false); |
1492 | local_bh_enable(); |
1493 | if (ret) |
1494 | dev_err(adap->pdev_dev, |
1495 | "Failed starting Mirror VI 0x%x, ret: %d\n" , |
1496 | pi->viid_mirror, ret); |
1497 | |
1498 | return ret; |
1499 | } |
1500 | |
1501 | static void cxgb4_port_mirror_stop(struct net_device *dev) |
1502 | { |
1503 | struct port_info *pi = netdev2pinfo(dev); |
1504 | struct adapter *adap = netdev2adap(dev); |
1505 | |
1506 | if (!pi->vi_mirror_count) |
1507 | return; |
1508 | |
1509 | t4_enable_vi_params(adap, mbox: adap->mbox, viid: pi->viid_mirror, rx_en: false, tx_en: false, |
1510 | dcb_en: false); |
1511 | } |
1512 | |
1513 | int cxgb4_port_mirror_alloc(struct net_device *dev) |
1514 | { |
1515 | struct port_info *pi = netdev2pinfo(dev); |
1516 | struct adapter *adap = netdev2adap(dev); |
1517 | int ret = 0; |
1518 | |
1519 | if (!pi->nmirrorqsets) |
1520 | return -EOPNOTSUPP; |
1521 | |
1522 | mutex_lock(&pi->vi_mirror_mutex); |
1523 | if (pi->viid_mirror) { |
1524 | pi->vi_mirror_count++; |
1525 | goto out_unlock; |
1526 | } |
1527 | |
1528 | ret = t4_init_port_mirror(pi, mbox: adap->mbox, port: pi->port_id, pf: adap->pf, vf: 0, |
1529 | mirror_viid: &pi->viid_mirror); |
1530 | if (ret) |
1531 | goto out_unlock; |
1532 | |
1533 | pi->vi_mirror_count = 1; |
1534 | |
1535 | if (adap->flags & CXGB4_FULL_INIT_DONE) { |
1536 | ret = cxgb4_port_mirror_alloc_queues(dev); |
1537 | if (ret) |
1538 | goto out_free_vi; |
1539 | |
1540 | ret = cxgb4_port_mirror_start(dev); |
1541 | if (ret) |
1542 | goto out_free_queues; |
1543 | } |
1544 | |
1545 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
1546 | return 0; |
1547 | |
1548 | out_free_queues: |
1549 | cxgb4_port_mirror_free_queues(dev); |
1550 | |
1551 | out_free_vi: |
1552 | pi->vi_mirror_count = 0; |
1553 | t4_free_vi(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, viid: pi->viid_mirror); |
1554 | pi->viid_mirror = 0; |
1555 | |
1556 | out_unlock: |
1557 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
1558 | return ret; |
1559 | } |
1560 | |
1561 | void cxgb4_port_mirror_free(struct net_device *dev) |
1562 | { |
1563 | struct port_info *pi = netdev2pinfo(dev); |
1564 | struct adapter *adap = netdev2adap(dev); |
1565 | |
1566 | mutex_lock(&pi->vi_mirror_mutex); |
1567 | if (!pi->viid_mirror) |
1568 | goto out_unlock; |
1569 | |
1570 | if (pi->vi_mirror_count > 1) { |
1571 | pi->vi_mirror_count--; |
1572 | goto out_unlock; |
1573 | } |
1574 | |
1575 | cxgb4_port_mirror_stop(dev); |
1576 | cxgb4_port_mirror_free_queues(dev); |
1577 | |
1578 | pi->vi_mirror_count = 0; |
1579 | t4_free_vi(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, viid: pi->viid_mirror); |
1580 | pi->viid_mirror = 0; |
1581 | |
1582 | out_unlock: |
1583 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
1584 | } |
1585 | |
1586 | /* |
1587 | * upper-layer driver support |
1588 | */ |
1589 | |
1590 | /* |
1591 | * Allocate an active-open TID and set it to the supplied value. |
1592 | */ |
1593 | int cxgb4_alloc_atid(struct tid_info *t, void *data) |
1594 | { |
1595 | int atid = -1; |
1596 | |
1597 | spin_lock_bh(lock: &t->atid_lock); |
1598 | if (t->afree) { |
1599 | union aopen_entry *p = t->afree; |
1600 | |
1601 | atid = (p - t->atid_tab) + t->atid_base; |
1602 | t->afree = p->next; |
1603 | p->data = data; |
1604 | t->atids_in_use++; |
1605 | } |
1606 | spin_unlock_bh(lock: &t->atid_lock); |
1607 | return atid; |
1608 | } |
1609 | EXPORT_SYMBOL(cxgb4_alloc_atid); |
1610 | |
1611 | /* |
1612 | * Release an active-open TID. |
1613 | */ |
1614 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid) |
1615 | { |
1616 | union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; |
1617 | |
1618 | spin_lock_bh(lock: &t->atid_lock); |
1619 | p->next = t->afree; |
1620 | t->afree = p; |
1621 | t->atids_in_use--; |
1622 | spin_unlock_bh(lock: &t->atid_lock); |
1623 | } |
1624 | EXPORT_SYMBOL(cxgb4_free_atid); |
1625 | |
1626 | /* |
1627 | * Allocate a server TID and set it to the supplied value. |
1628 | */ |
1629 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) |
1630 | { |
1631 | int stid; |
1632 | |
1633 | spin_lock_bh(lock: &t->stid_lock); |
1634 | if (family == PF_INET) { |
1635 | stid = find_first_zero_bit(addr: t->stid_bmap, size: t->nstids); |
1636 | if (stid < t->nstids) |
1637 | __set_bit(stid, t->stid_bmap); |
1638 | else |
1639 | stid = -1; |
1640 | } else { |
1641 | stid = bitmap_find_free_region(bitmap: t->stid_bmap, bits: t->nstids, order: 1); |
1642 | if (stid < 0) |
1643 | stid = -1; |
1644 | } |
1645 | if (stid >= 0) { |
1646 | t->stid_tab[stid].data = data; |
1647 | stid += t->stid_base; |
1648 | /* IPv6 requires max of 520 bits or 16 cells in TCAM |
1649 | * This is equivalent to 4 TIDs. With CLIP enabled it |
1650 | * needs 2 TIDs. |
1651 | */ |
1652 | if (family == PF_INET6) { |
1653 | t->stids_in_use += 2; |
1654 | t->v6_stids_in_use += 2; |
1655 | } else { |
1656 | t->stids_in_use++; |
1657 | } |
1658 | } |
1659 | spin_unlock_bh(lock: &t->stid_lock); |
1660 | return stid; |
1661 | } |
1662 | EXPORT_SYMBOL(cxgb4_alloc_stid); |
1663 | |
1664 | /* Allocate a server filter TID and set it to the supplied value. |
1665 | */ |
1666 | int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) |
1667 | { |
1668 | int stid; |
1669 | |
1670 | spin_lock_bh(lock: &t->stid_lock); |
1671 | if (family == PF_INET) { |
1672 | stid = find_next_zero_bit(addr: t->stid_bmap, |
1673 | size: t->nstids + t->nsftids, offset: t->nstids); |
1674 | if (stid < (t->nstids + t->nsftids)) |
1675 | __set_bit(stid, t->stid_bmap); |
1676 | else |
1677 | stid = -1; |
1678 | } else { |
1679 | stid = -1; |
1680 | } |
1681 | if (stid >= 0) { |
1682 | t->stid_tab[stid].data = data; |
1683 | stid -= t->nstids; |
1684 | stid += t->sftid_base; |
1685 | t->sftids_in_use++; |
1686 | } |
1687 | spin_unlock_bh(lock: &t->stid_lock); |
1688 | return stid; |
1689 | } |
1690 | EXPORT_SYMBOL(cxgb4_alloc_sftid); |
1691 | |
1692 | /* Release a server TID. |
1693 | */ |
1694 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) |
1695 | { |
1696 | /* Is it a server filter TID? */ |
1697 | if (t->nsftids && (stid >= t->sftid_base)) { |
1698 | stid -= t->sftid_base; |
1699 | stid += t->nstids; |
1700 | } else { |
1701 | stid -= t->stid_base; |
1702 | } |
1703 | |
1704 | spin_lock_bh(lock: &t->stid_lock); |
1705 | if (family == PF_INET) |
1706 | __clear_bit(stid, t->stid_bmap); |
1707 | else |
1708 | bitmap_release_region(bitmap: t->stid_bmap, pos: stid, order: 1); |
1709 | t->stid_tab[stid].data = NULL; |
1710 | if (stid < t->nstids) { |
1711 | if (family == PF_INET6) { |
1712 | t->stids_in_use -= 2; |
1713 | t->v6_stids_in_use -= 2; |
1714 | } else { |
1715 | t->stids_in_use--; |
1716 | } |
1717 | } else { |
1718 | t->sftids_in_use--; |
1719 | } |
1720 | |
1721 | spin_unlock_bh(lock: &t->stid_lock); |
1722 | } |
1723 | EXPORT_SYMBOL(cxgb4_free_stid); |
1724 | |
1725 | /* |
1726 | * Populate a TID_RELEASE WR. Caller must properly size the skb. |
1727 | */ |
1728 | static void mk_tid_release(struct sk_buff *skb, unsigned int chan, |
1729 | unsigned int tid) |
1730 | { |
1731 | struct cpl_tid_release *req; |
1732 | |
1733 | set_wr_txq(skb, prio: CPL_PRIORITY_SETUP, queue: chan); |
1734 | req = __skb_put(skb, len: sizeof(*req)); |
1735 | INIT_TP_WR(req, tid); |
1736 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); |
1737 | } |
1738 | |
1739 | /* |
1740 | * Queue a TID release request and if necessary schedule a work queue to |
1741 | * process it. |
1742 | */ |
1743 | static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, |
1744 | unsigned int tid) |
1745 | { |
1746 | struct adapter *adap = container_of(t, struct adapter, tids); |
1747 | void **p = &t->tid_tab[tid - t->tid_base]; |
1748 | |
1749 | spin_lock_bh(lock: &adap->tid_release_lock); |
1750 | *p = adap->tid_release_head; |
1751 | /* Low 2 bits encode the Tx channel number */ |
1752 | adap->tid_release_head = (void **)((uintptr_t)p | chan); |
1753 | if (!adap->tid_release_task_busy) { |
1754 | adap->tid_release_task_busy = true; |
1755 | queue_work(wq: adap->workq, work: &adap->tid_release_task); |
1756 | } |
1757 | spin_unlock_bh(lock: &adap->tid_release_lock); |
1758 | } |
1759 | |
1760 | /* |
1761 | * Process the list of pending TID release requests. |
1762 | */ |
1763 | static void process_tid_release_list(struct work_struct *work) |
1764 | { |
1765 | struct sk_buff *skb; |
1766 | struct adapter *adap; |
1767 | |
1768 | adap = container_of(work, struct adapter, tid_release_task); |
1769 | |
1770 | spin_lock_bh(lock: &adap->tid_release_lock); |
1771 | while (adap->tid_release_head) { |
1772 | void **p = adap->tid_release_head; |
1773 | unsigned int chan = (uintptr_t)p & 3; |
1774 | p = (void *)p - chan; |
1775 | |
1776 | adap->tid_release_head = *p; |
1777 | *p = NULL; |
1778 | spin_unlock_bh(lock: &adap->tid_release_lock); |
1779 | |
1780 | while (!(skb = alloc_skb(size: sizeof(struct cpl_tid_release), |
1781 | GFP_KERNEL))) |
1782 | schedule_timeout_uninterruptible(timeout: 1); |
1783 | |
1784 | mk_tid_release(skb, chan, tid: p - adap->tids.tid_tab); |
1785 | t4_ofld_send(adap, skb); |
1786 | spin_lock_bh(lock: &adap->tid_release_lock); |
1787 | } |
1788 | adap->tid_release_task_busy = false; |
1789 | spin_unlock_bh(lock: &adap->tid_release_lock); |
1790 | } |
1791 | |
1792 | /* |
1793 | * Release a TID and inform HW. If we are unable to allocate the release |
1794 | * message we defer to a work queue. |
1795 | */ |
1796 | void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, |
1797 | unsigned short family) |
1798 | { |
1799 | struct adapter *adap = container_of(t, struct adapter, tids); |
1800 | struct sk_buff *skb; |
1801 | |
1802 | WARN_ON(tid_out_of_range(&adap->tids, tid)); |
1803 | |
1804 | if (t->tid_tab[tid - adap->tids.tid_base]) { |
1805 | t->tid_tab[tid - adap->tids.tid_base] = NULL; |
1806 | atomic_dec(v: &t->conns_in_use); |
1807 | if (t->hash_base && (tid >= t->hash_base)) { |
1808 | if (family == AF_INET6) |
1809 | atomic_sub(i: 2, v: &t->hash_tids_in_use); |
1810 | else |
1811 | atomic_dec(v: &t->hash_tids_in_use); |
1812 | } else { |
1813 | if (family == AF_INET6) |
1814 | atomic_sub(i: 2, v: &t->tids_in_use); |
1815 | else |
1816 | atomic_dec(v: &t->tids_in_use); |
1817 | } |
1818 | } |
1819 | |
1820 | skb = alloc_skb(size: sizeof(struct cpl_tid_release), GFP_ATOMIC); |
1821 | if (likely(skb)) { |
1822 | mk_tid_release(skb, chan, tid); |
1823 | t4_ofld_send(adap, skb); |
1824 | } else |
1825 | cxgb4_queue_tid_release(t, chan, tid); |
1826 | } |
1827 | EXPORT_SYMBOL(cxgb4_remove_tid); |
1828 | |
1829 | /* |
1830 | * Allocate and initialize the TID tables. Returns 0 on success. |
1831 | */ |
1832 | static int tid_init(struct tid_info *t) |
1833 | { |
1834 | struct adapter *adap = container_of(t, struct adapter, tids); |
1835 | unsigned int max_ftids = t->nftids + t->nsftids; |
1836 | unsigned int natids = t->natids; |
1837 | unsigned int hpftid_bmap_size; |
1838 | unsigned int eotid_bmap_size; |
1839 | unsigned int stid_bmap_size; |
1840 | unsigned int ftid_bmap_size; |
1841 | size_t size; |
1842 | |
1843 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
1844 | ftid_bmap_size = BITS_TO_LONGS(t->nftids); |
1845 | hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); |
1846 | eotid_bmap_size = BITS_TO_LONGS(t->neotids); |
1847 | size = t->ntids * sizeof(*t->tid_tab) + |
1848 | natids * sizeof(*t->atid_tab) + |
1849 | t->nstids * sizeof(*t->stid_tab) + |
1850 | t->nsftids * sizeof(*t->stid_tab) + |
1851 | stid_bmap_size * sizeof(long) + |
1852 | t->nhpftids * sizeof(*t->hpftid_tab) + |
1853 | hpftid_bmap_size * sizeof(long) + |
1854 | max_ftids * sizeof(*t->ftid_tab) + |
1855 | ftid_bmap_size * sizeof(long) + |
1856 | t->neotids * sizeof(*t->eotid_tab) + |
1857 | eotid_bmap_size * sizeof(long); |
1858 | |
1859 | t->tid_tab = kvzalloc(size, GFP_KERNEL); |
1860 | if (!t->tid_tab) |
1861 | return -ENOMEM; |
1862 | |
1863 | t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; |
1864 | t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; |
1865 | t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; |
1866 | t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; |
1867 | t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; |
1868 | t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; |
1869 | t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; |
1870 | t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; |
1871 | t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; |
1872 | spin_lock_init(&t->stid_lock); |
1873 | spin_lock_init(&t->atid_lock); |
1874 | spin_lock_init(&t->ftid_lock); |
1875 | |
1876 | t->stids_in_use = 0; |
1877 | t->v6_stids_in_use = 0; |
1878 | t->sftids_in_use = 0; |
1879 | t->afree = NULL; |
1880 | t->atids_in_use = 0; |
1881 | atomic_set(v: &t->tids_in_use, i: 0); |
1882 | atomic_set(v: &t->conns_in_use, i: 0); |
1883 | atomic_set(v: &t->hash_tids_in_use, i: 0); |
1884 | atomic_set(v: &t->eotids_in_use, i: 0); |
1885 | |
1886 | /* Setup the free list for atid_tab and clear the stid bitmap. */ |
1887 | if (natids) { |
1888 | while (--natids) |
1889 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; |
1890 | t->afree = t->atid_tab; |
1891 | } |
1892 | |
1893 | if (is_offload(adap)) { |
1894 | bitmap_zero(dst: t->stid_bmap, nbits: t->nstids + t->nsftids); |
1895 | /* Reserve stid 0 for T4/T5 adapters */ |
1896 | if (!t->stid_base && |
1897 | CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
1898 | __set_bit(0, t->stid_bmap); |
1899 | |
1900 | if (t->neotids) |
1901 | bitmap_zero(dst: t->eotid_bmap, nbits: t->neotids); |
1902 | } |
1903 | |
1904 | if (t->nhpftids) |
1905 | bitmap_zero(dst: t->hpftid_bmap, nbits: t->nhpftids); |
1906 | bitmap_zero(dst: t->ftid_bmap, nbits: t->nftids); |
1907 | return 0; |
1908 | } |
1909 | |
1910 | /** |
1911 | * cxgb4_create_server - create an IP server |
1912 | * @dev: the device |
1913 | * @stid: the server TID |
1914 | * @sip: local IP address to bind server to |
1915 | * @sport: the server's TCP port |
1916 | * @vlan: the VLAN header information |
1917 | * @queue: queue to direct messages from this server to |
1918 | * |
1919 | * Create an IP server for the given port and address. |
1920 | * Returns <0 on error and one of the %NET_XMIT_* values on success. |
1921 | */ |
1922 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, |
1923 | __be32 sip, __be16 sport, __be16 vlan, |
1924 | unsigned int queue) |
1925 | { |
1926 | unsigned int chan; |
1927 | struct sk_buff *skb; |
1928 | struct adapter *adap; |
1929 | struct cpl_pass_open_req *req; |
1930 | int ret; |
1931 | |
1932 | skb = alloc_skb(size: sizeof(*req), GFP_KERNEL); |
1933 | if (!skb) |
1934 | return -ENOMEM; |
1935 | |
1936 | adap = netdev2adap(dev); |
1937 | req = __skb_put(skb, len: sizeof(*req)); |
1938 | INIT_TP_WR(req, 0); |
1939 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); |
1940 | req->local_port = sport; |
1941 | req->peer_port = htons(0); |
1942 | req->local_ip = sip; |
1943 | req->peer_ip = htonl(0); |
1944 | chan = rxq_to_chan(p: &adap->sge, qid: queue); |
1945 | req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); |
1946 | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | |
1947 | SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); |
1948 | ret = t4_mgmt_tx(adap, skb); |
1949 | return net_xmit_eval(ret); |
1950 | } |
1951 | EXPORT_SYMBOL(cxgb4_create_server); |
1952 | |
1953 | /* cxgb4_create_server6 - create an IPv6 server |
1954 | * @dev: the device |
1955 | * @stid: the server TID |
1956 | * @sip: local IPv6 address to bind server to |
1957 | * @sport: the server's TCP port |
1958 | * @queue: queue to direct messages from this server to |
1959 | * |
1960 | * Create an IPv6 server for the given port and address. |
1961 | * Returns <0 on error and one of the %NET_XMIT_* values on success. |
1962 | */ |
1963 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, |
1964 | const struct in6_addr *sip, __be16 sport, |
1965 | unsigned int queue) |
1966 | { |
1967 | unsigned int chan; |
1968 | struct sk_buff *skb; |
1969 | struct adapter *adap; |
1970 | struct cpl_pass_open_req6 *req; |
1971 | int ret; |
1972 | |
1973 | skb = alloc_skb(size: sizeof(*req), GFP_KERNEL); |
1974 | if (!skb) |
1975 | return -ENOMEM; |
1976 | |
1977 | adap = netdev2adap(dev); |
1978 | req = __skb_put(skb, len: sizeof(*req)); |
1979 | INIT_TP_WR(req, 0); |
1980 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); |
1981 | req->local_port = sport; |
1982 | req->peer_port = htons(0); |
1983 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); |
1984 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); |
1985 | req->peer_ip_hi = cpu_to_be64(0); |
1986 | req->peer_ip_lo = cpu_to_be64(0); |
1987 | chan = rxq_to_chan(p: &adap->sge, qid: queue); |
1988 | req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); |
1989 | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | |
1990 | SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); |
1991 | ret = t4_mgmt_tx(adap, skb); |
1992 | return net_xmit_eval(ret); |
1993 | } |
1994 | EXPORT_SYMBOL(cxgb4_create_server6); |
1995 | |
1996 | int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, |
1997 | unsigned int queue, bool ipv6) |
1998 | { |
1999 | struct sk_buff *skb; |
2000 | struct adapter *adap; |
2001 | struct cpl_close_listsvr_req *req; |
2002 | int ret; |
2003 | |
2004 | adap = netdev2adap(dev); |
2005 | |
2006 | skb = alloc_skb(size: sizeof(*req), GFP_KERNEL); |
2007 | if (!skb) |
2008 | return -ENOMEM; |
2009 | |
2010 | req = __skb_put(skb, len: sizeof(*req)); |
2011 | INIT_TP_WR(req, 0); |
2012 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); |
2013 | req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : |
2014 | LISTSVR_IPV6_V(0)) | QUEUENO_V(queue)); |
2015 | ret = t4_mgmt_tx(adap, skb); |
2016 | return net_xmit_eval(ret); |
2017 | } |
2018 | EXPORT_SYMBOL(cxgb4_remove_server); |
2019 | |
2020 | /** |
2021 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU |
2022 | * @mtus: the HW MTU table |
2023 | * @mtu: the target MTU |
2024 | * @idx: index of selected entry in the MTU table |
2025 | * |
2026 | * Returns the index and the value in the HW MTU table that is closest to |
2027 | * but does not exceed @mtu, unless @mtu is smaller than any value in the |
2028 | * table, in which case that smallest available value is selected. |
2029 | */ |
2030 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, |
2031 | unsigned int *idx) |
2032 | { |
2033 | unsigned int i = 0; |
2034 | |
2035 | while (i < NMTUS - 1 && mtus[i + 1] <= mtu) |
2036 | ++i; |
2037 | if (idx) |
2038 | *idx = i; |
2039 | return mtus[i]; |
2040 | } |
2041 | EXPORT_SYMBOL(cxgb4_best_mtu); |
2042 | |
2043 | /** |
2044 | * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned |
2045 | * @mtus: the HW MTU table |
2046 | * @header_size: Header Size |
2047 | * @data_size_max: maximum Data Segment Size |
2048 | * @data_size_align: desired Data Segment Size Alignment (2^N) |
2049 | * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) |
2050 | * |
2051 | * Similar to cxgb4_best_mtu() but instead of searching the Hardware |
2052 | * MTU Table based solely on a Maximum MTU parameter, we break that |
2053 | * parameter up into a Header Size and Maximum Data Segment Size, and |
2054 | * provide a desired Data Segment Size Alignment. If we find an MTU in |
2055 | * the Hardware MTU Table which will result in a Data Segment Size with |
2056 | * the requested alignment _and_ that MTU isn't "too far" from the |
2057 | * closest MTU, then we'll return that rather than the closest MTU. |
2058 | */ |
2059 | unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, |
2060 | unsigned short , |
2061 | unsigned short data_size_max, |
2062 | unsigned short data_size_align, |
2063 | unsigned int *mtu_idxp) |
2064 | { |
2065 | unsigned short max_mtu = header_size + data_size_max; |
2066 | unsigned short data_size_align_mask = data_size_align - 1; |
2067 | int mtu_idx, aligned_mtu_idx; |
2068 | |
2069 | /* Scan the MTU Table till we find an MTU which is larger than our |
2070 | * Maximum MTU or we reach the end of the table. Along the way, |
2071 | * record the last MTU found, if any, which will result in a Data |
2072 | * Segment Length matching the requested alignment. |
2073 | */ |
2074 | for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { |
2075 | unsigned short data_size = mtus[mtu_idx] - header_size; |
2076 | |
2077 | /* If this MTU minus the Header Size would result in a |
2078 | * Data Segment Size of the desired alignment, remember it. |
2079 | */ |
2080 | if ((data_size & data_size_align_mask) == 0) |
2081 | aligned_mtu_idx = mtu_idx; |
2082 | |
2083 | /* If we're not at the end of the Hardware MTU Table and the |
2084 | * next element is larger than our Maximum MTU, drop out of |
2085 | * the loop. |
2086 | */ |
2087 | if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) |
2088 | break; |
2089 | } |
2090 | |
2091 | /* If we fell out of the loop because we ran to the end of the table, |
2092 | * then we just have to use the last [largest] entry. |
2093 | */ |
2094 | if (mtu_idx == NMTUS) |
2095 | mtu_idx--; |
2096 | |
2097 | /* If we found an MTU which resulted in the requested Data Segment |
2098 | * Length alignment and that's "not far" from the largest MTU which is |
2099 | * less than or equal to the maximum MTU, then use that. |
2100 | */ |
2101 | if (aligned_mtu_idx >= 0 && |
2102 | mtu_idx - aligned_mtu_idx <= 1) |
2103 | mtu_idx = aligned_mtu_idx; |
2104 | |
2105 | /* If the caller has passed in an MTU Index pointer, pass the |
2106 | * MTU Index back. Return the MTU value. |
2107 | */ |
2108 | if (mtu_idxp) |
2109 | *mtu_idxp = mtu_idx; |
2110 | return mtus[mtu_idx]; |
2111 | } |
2112 | EXPORT_SYMBOL(cxgb4_best_aligned_mtu); |
2113 | |
2114 | /** |
2115 | * cxgb4_port_chan - get the HW channel of a port |
2116 | * @dev: the net device for the port |
2117 | * |
2118 | * Return the HW Tx channel of the given port. |
2119 | */ |
2120 | unsigned int cxgb4_port_chan(const struct net_device *dev) |
2121 | { |
2122 | return netdev2pinfo(dev)->tx_chan; |
2123 | } |
2124 | EXPORT_SYMBOL(cxgb4_port_chan); |
2125 | |
2126 | /** |
2127 | * cxgb4_port_e2cchan - get the HW c-channel of a port |
2128 | * @dev: the net device for the port |
2129 | * |
2130 | * Return the HW RX c-channel of the given port. |
2131 | */ |
2132 | unsigned int cxgb4_port_e2cchan(const struct net_device *dev) |
2133 | { |
2134 | return netdev2pinfo(dev)->rx_cchan; |
2135 | } |
2136 | EXPORT_SYMBOL(cxgb4_port_e2cchan); |
2137 | |
2138 | unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) |
2139 | { |
2140 | struct adapter *adap = netdev2adap(dev); |
2141 | u32 v1, v2, lp_count, hp_count; |
2142 | |
2143 | v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); |
2144 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); |
2145 | if (is_t4(chip: adap->params.chip)) { |
2146 | lp_count = LP_COUNT_G(v1); |
2147 | hp_count = HP_COUNT_G(v1); |
2148 | } else { |
2149 | lp_count = LP_COUNT_T5_G(v1); |
2150 | hp_count = HP_COUNT_T5_G(v2); |
2151 | } |
2152 | return lpfifo ? lp_count : hp_count; |
2153 | } |
2154 | EXPORT_SYMBOL(cxgb4_dbfifo_count); |
2155 | |
2156 | /** |
2157 | * cxgb4_port_viid - get the VI id of a port |
2158 | * @dev: the net device for the port |
2159 | * |
2160 | * Return the VI id of the given port. |
2161 | */ |
2162 | unsigned int cxgb4_port_viid(const struct net_device *dev) |
2163 | { |
2164 | return netdev2pinfo(dev)->viid; |
2165 | } |
2166 | EXPORT_SYMBOL(cxgb4_port_viid); |
2167 | |
2168 | /** |
2169 | * cxgb4_port_idx - get the index of a port |
2170 | * @dev: the net device for the port |
2171 | * |
2172 | * Return the index of the given port. |
2173 | */ |
2174 | unsigned int cxgb4_port_idx(const struct net_device *dev) |
2175 | { |
2176 | return netdev2pinfo(dev)->port_id; |
2177 | } |
2178 | EXPORT_SYMBOL(cxgb4_port_idx); |
2179 | |
2180 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
2181 | struct tp_tcp_stats *v6) |
2182 | { |
2183 | struct adapter *adap = pci_get_drvdata(pdev); |
2184 | |
2185 | spin_lock(lock: &adap->stats_lock); |
2186 | t4_tp_get_tcp_stats(adap, v4, v6, sleep_ok: false); |
2187 | spin_unlock(lock: &adap->stats_lock); |
2188 | } |
2189 | EXPORT_SYMBOL(cxgb4_get_tcp_stats); |
2190 | |
2191 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, |
2192 | const unsigned int *pgsz_order) |
2193 | { |
2194 | struct adapter *adap = netdev2adap(dev); |
2195 | |
2196 | t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, val: tag_mask); |
2197 | t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | |
2198 | HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) | |
2199 | HPZ3_V(pgsz_order[3])); |
2200 | } |
2201 | EXPORT_SYMBOL(cxgb4_iscsi_init); |
2202 | |
2203 | int cxgb4_flush_eq_cache(struct net_device *dev) |
2204 | { |
2205 | struct adapter *adap = netdev2adap(dev); |
2206 | |
2207 | return t4_sge_ctxt_flush(adap, mbox: adap->mbox, ctxt_type: CTXT_EGRESS); |
2208 | } |
2209 | EXPORT_SYMBOL(cxgb4_flush_eq_cache); |
2210 | |
2211 | static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) |
2212 | { |
2213 | u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; |
2214 | __be64 indices; |
2215 | int ret; |
2216 | |
2217 | spin_lock(lock: &adap->win0_lock); |
2218 | ret = t4_memory_rw(adap, win: 0, mtype: MEM_EDC0, addr, |
2219 | len: sizeof(indices), buf: (__be32 *)&indices, |
2220 | T4_MEMORY_READ); |
2221 | spin_unlock(lock: &adap->win0_lock); |
2222 | if (!ret) { |
2223 | *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; |
2224 | *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; |
2225 | } |
2226 | return ret; |
2227 | } |
2228 | |
2229 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, |
2230 | u16 size) |
2231 | { |
2232 | struct adapter *adap = netdev2adap(dev); |
2233 | u16 hw_pidx, hw_cidx; |
2234 | int ret; |
2235 | |
2236 | ret = read_eq_indices(adap, qid, pidx: &hw_pidx, cidx: &hw_cidx); |
2237 | if (ret) |
2238 | goto out; |
2239 | |
2240 | if (pidx != hw_pidx) { |
2241 | u16 delta; |
2242 | u32 val; |
2243 | |
2244 | if (pidx >= hw_pidx) |
2245 | delta = pidx - hw_pidx; |
2246 | else |
2247 | delta = size - hw_pidx + pidx; |
2248 | |
2249 | if (is_t4(chip: adap->params.chip)) |
2250 | val = PIDX_V(delta); |
2251 | else |
2252 | val = PIDX_T5_V(delta); |
2253 | wmb(); |
2254 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
2255 | QID_V(qid) | val); |
2256 | } |
2257 | out: |
2258 | return ret; |
2259 | } |
2260 | EXPORT_SYMBOL(cxgb4_sync_txq_pidx); |
2261 | |
2262 | int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) |
2263 | { |
2264 | u32 edc0_size, edc1_size, mc0_size, mc1_size, size; |
2265 | u32 edc0_end, edc1_end, mc0_end, mc1_end; |
2266 | u32 offset, memtype, memaddr; |
2267 | struct adapter *adap; |
2268 | u32 hma_size = 0; |
2269 | int ret; |
2270 | |
2271 | adap = netdev2adap(dev); |
2272 | |
2273 | offset = ((stag >> 8) * 32) + adap->vres.stag.start; |
2274 | |
2275 | /* Figure out where the offset lands in the Memory Type/Address scheme. |
2276 | * This code assumes that the memory is laid out starting at offset 0 |
2277 | * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 |
2278 | * and EDC1. Some cards will have neither MC0 nor MC1, most cards have |
2279 | * MC0, and some have both MC0 and MC1. |
2280 | */ |
2281 | size = t4_read_reg(adap, MA_EDRAM0_BAR_A); |
2282 | edc0_size = EDRAM0_SIZE_G(size) << 20; |
2283 | size = t4_read_reg(adap, MA_EDRAM1_BAR_A); |
2284 | edc1_size = EDRAM1_SIZE_G(size) << 20; |
2285 | size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); |
2286 | mc0_size = EXT_MEM0_SIZE_G(size) << 20; |
2287 | |
2288 | if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) { |
2289 | size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); |
2290 | hma_size = EXT_MEM1_SIZE_G(size) << 20; |
2291 | } |
2292 | edc0_end = edc0_size; |
2293 | edc1_end = edc0_end + edc1_size; |
2294 | mc0_end = edc1_end + mc0_size; |
2295 | |
2296 | if (offset < edc0_end) { |
2297 | memtype = MEM_EDC0; |
2298 | memaddr = offset; |
2299 | } else if (offset < edc1_end) { |
2300 | memtype = MEM_EDC1; |
2301 | memaddr = offset - edc0_end; |
2302 | } else { |
2303 | if (hma_size && (offset < (edc1_end + hma_size))) { |
2304 | memtype = MEM_HMA; |
2305 | memaddr = offset - edc1_end; |
2306 | } else if (offset < mc0_end) { |
2307 | memtype = MEM_MC0; |
2308 | memaddr = offset - edc1_end; |
2309 | } else if (is_t5(chip: adap->params.chip)) { |
2310 | size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); |
2311 | mc1_size = EXT_MEM1_SIZE_G(size) << 20; |
2312 | mc1_end = mc0_end + mc1_size; |
2313 | if (offset < mc1_end) { |
2314 | memtype = MEM_MC1; |
2315 | memaddr = offset - mc0_end; |
2316 | } else { |
2317 | /* offset beyond the end of any memory */ |
2318 | goto err; |
2319 | } |
2320 | } else { |
2321 | /* T4/T6 only has a single memory channel */ |
2322 | goto err; |
2323 | } |
2324 | } |
2325 | |
2326 | spin_lock(lock: &adap->win0_lock); |
2327 | ret = t4_memory_rw(adap, win: 0, mtype: memtype, addr: memaddr, len: 32, buf: tpte, T4_MEMORY_READ); |
2328 | spin_unlock(lock: &adap->win0_lock); |
2329 | return ret; |
2330 | |
2331 | err: |
2332 | dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n" , |
2333 | stag, offset); |
2334 | return -EINVAL; |
2335 | } |
2336 | EXPORT_SYMBOL(cxgb4_read_tpte); |
2337 | |
2338 | u64 cxgb4_read_sge_timestamp(struct net_device *dev) |
2339 | { |
2340 | u32 hi, lo; |
2341 | struct adapter *adap; |
2342 | |
2343 | adap = netdev2adap(dev); |
2344 | lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); |
2345 | hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); |
2346 | |
2347 | return ((u64)hi << 32) | (u64)lo; |
2348 | } |
2349 | EXPORT_SYMBOL(cxgb4_read_sge_timestamp); |
2350 | |
2351 | int cxgb4_bar2_sge_qregs(struct net_device *dev, |
2352 | unsigned int qid, |
2353 | enum cxgb4_bar2_qtype qtype, |
2354 | int user, |
2355 | u64 *pbar2_qoffset, |
2356 | unsigned int *pbar2_qid) |
2357 | { |
2358 | return t4_bar2_sge_qregs(adapter: netdev2adap(dev), |
2359 | qid, |
2360 | qtype: (qtype == CXGB4_BAR2_QTYPE_EGRESS |
2361 | ? T4_BAR2_QTYPE_EGRESS |
2362 | : T4_BAR2_QTYPE_INGRESS), |
2363 | user, |
2364 | pbar2_qoffset, |
2365 | pbar2_qid); |
2366 | } |
2367 | EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); |
2368 | |
2369 | static struct pci_driver cxgb4_driver; |
2370 | |
2371 | static void check_neigh_update(struct neighbour *neigh) |
2372 | { |
2373 | const struct device *parent; |
2374 | const struct net_device *netdev = neigh->dev; |
2375 | |
2376 | if (is_vlan_dev(dev: netdev)) |
2377 | netdev = vlan_dev_real_dev(dev: netdev); |
2378 | parent = netdev->dev.parent; |
2379 | if (parent && parent->driver == &cxgb4_driver.driver) |
2380 | t4_l2t_update(adap: dev_get_drvdata(dev: parent), neigh); |
2381 | } |
2382 | |
2383 | static int netevent_cb(struct notifier_block *nb, unsigned long event, |
2384 | void *data) |
2385 | { |
2386 | switch (event) { |
2387 | case NETEVENT_NEIGH_UPDATE: |
2388 | check_neigh_update(neigh: data); |
2389 | break; |
2390 | case NETEVENT_REDIRECT: |
2391 | default: |
2392 | break; |
2393 | } |
2394 | return 0; |
2395 | } |
2396 | |
2397 | static bool netevent_registered; |
2398 | static struct notifier_block cxgb4_netevent_nb = { |
2399 | .notifier_call = netevent_cb |
2400 | }; |
2401 | |
2402 | static void drain_db_fifo(struct adapter *adap, int usecs) |
2403 | { |
2404 | u32 v1, v2, lp_count, hp_count; |
2405 | |
2406 | do { |
2407 | v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); |
2408 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); |
2409 | if (is_t4(chip: adap->params.chip)) { |
2410 | lp_count = LP_COUNT_G(v1); |
2411 | hp_count = HP_COUNT_G(v1); |
2412 | } else { |
2413 | lp_count = LP_COUNT_T5_G(v1); |
2414 | hp_count = HP_COUNT_T5_G(v2); |
2415 | } |
2416 | |
2417 | if (lp_count == 0 && hp_count == 0) |
2418 | break; |
2419 | set_current_state(TASK_UNINTERRUPTIBLE); |
2420 | schedule_timeout(timeout: usecs_to_jiffies(u: usecs)); |
2421 | } while (1); |
2422 | } |
2423 | |
2424 | static void disable_txq_db(struct sge_txq *q) |
2425 | { |
2426 | unsigned long flags; |
2427 | |
2428 | spin_lock_irqsave(&q->db_lock, flags); |
2429 | q->db_disabled = 1; |
2430 | spin_unlock_irqrestore(lock: &q->db_lock, flags); |
2431 | } |
2432 | |
2433 | static void enable_txq_db(struct adapter *adap, struct sge_txq *q) |
2434 | { |
2435 | spin_lock_irq(lock: &q->db_lock); |
2436 | if (q->db_pidx_inc) { |
2437 | /* Make sure that all writes to the TX descriptors |
2438 | * are committed before we tell HW about them. |
2439 | */ |
2440 | wmb(); |
2441 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
2442 | QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); |
2443 | q->db_pidx_inc = 0; |
2444 | } |
2445 | q->db_disabled = 0; |
2446 | spin_unlock_irq(lock: &q->db_lock); |
2447 | } |
2448 | |
2449 | static void disable_dbs(struct adapter *adap) |
2450 | { |
2451 | int i; |
2452 | |
2453 | for_each_ethrxq(&adap->sge, i) |
2454 | disable_txq_db(q: &adap->sge.ethtxq[i].q); |
2455 | if (is_offload(adap)) { |
2456 | struct sge_uld_txq_info *txq_info = |
2457 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; |
2458 | |
2459 | if (txq_info) { |
2460 | for_each_ofldtxq(&adap->sge, i) { |
2461 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; |
2462 | |
2463 | disable_txq_db(q: &txq->q); |
2464 | } |
2465 | } |
2466 | } |
2467 | for_each_port(adap, i) |
2468 | disable_txq_db(q: &adap->sge.ctrlq[i].q); |
2469 | } |
2470 | |
2471 | static void enable_dbs(struct adapter *adap) |
2472 | { |
2473 | int i; |
2474 | |
2475 | for_each_ethrxq(&adap->sge, i) |
2476 | enable_txq_db(adap, q: &adap->sge.ethtxq[i].q); |
2477 | if (is_offload(adap)) { |
2478 | struct sge_uld_txq_info *txq_info = |
2479 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; |
2480 | |
2481 | if (txq_info) { |
2482 | for_each_ofldtxq(&adap->sge, i) { |
2483 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; |
2484 | |
2485 | enable_txq_db(adap, q: &txq->q); |
2486 | } |
2487 | } |
2488 | } |
2489 | for_each_port(adap, i) |
2490 | enable_txq_db(adap, q: &adap->sge.ctrlq[i].q); |
2491 | } |
2492 | |
2493 | static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) |
2494 | { |
2495 | enum cxgb4_uld type = CXGB4_ULD_RDMA; |
2496 | |
2497 | if (adap->uld && adap->uld[type].handle) |
2498 | adap->uld[type].control(adap->uld[type].handle, cmd); |
2499 | } |
2500 | |
2501 | static void process_db_full(struct work_struct *work) |
2502 | { |
2503 | struct adapter *adap; |
2504 | |
2505 | adap = container_of(work, struct adapter, db_full_task); |
2506 | |
2507 | drain_db_fifo(adap, usecs: dbfifo_drain_delay); |
2508 | enable_dbs(adap); |
2509 | notify_rdma_uld(adap, cmd: CXGB4_CONTROL_DB_EMPTY); |
2510 | if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
2511 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, |
2512 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, |
2513 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); |
2514 | else |
2515 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, |
2516 | DBFIFO_LP_INT_F, DBFIFO_LP_INT_F); |
2517 | } |
2518 | |
2519 | static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) |
2520 | { |
2521 | u16 hw_pidx, hw_cidx; |
2522 | int ret; |
2523 | |
2524 | spin_lock_irq(lock: &q->db_lock); |
2525 | ret = read_eq_indices(adap, qid: (u16)q->cntxt_id, pidx: &hw_pidx, cidx: &hw_cidx); |
2526 | if (ret) |
2527 | goto out; |
2528 | if (q->db_pidx != hw_pidx) { |
2529 | u16 delta; |
2530 | u32 val; |
2531 | |
2532 | if (q->db_pidx >= hw_pidx) |
2533 | delta = q->db_pidx - hw_pidx; |
2534 | else |
2535 | delta = q->size - hw_pidx + q->db_pidx; |
2536 | |
2537 | if (is_t4(chip: adap->params.chip)) |
2538 | val = PIDX_V(delta); |
2539 | else |
2540 | val = PIDX_T5_V(delta); |
2541 | wmb(); |
2542 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
2543 | QID_V(q->cntxt_id) | val); |
2544 | } |
2545 | out: |
2546 | q->db_disabled = 0; |
2547 | q->db_pidx_inc = 0; |
2548 | spin_unlock_irq(lock: &q->db_lock); |
2549 | if (ret) |
2550 | CH_WARN(adap, "DB drop recovery failed.\n" ); |
2551 | } |
2552 | |
2553 | static void recover_all_queues(struct adapter *adap) |
2554 | { |
2555 | int i; |
2556 | |
2557 | for_each_ethrxq(&adap->sge, i) |
2558 | sync_txq_pidx(adap, q: &adap->sge.ethtxq[i].q); |
2559 | if (is_offload(adap)) { |
2560 | struct sge_uld_txq_info *txq_info = |
2561 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; |
2562 | if (txq_info) { |
2563 | for_each_ofldtxq(&adap->sge, i) { |
2564 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; |
2565 | |
2566 | sync_txq_pidx(adap, q: &txq->q); |
2567 | } |
2568 | } |
2569 | } |
2570 | for_each_port(adap, i) |
2571 | sync_txq_pidx(adap, q: &adap->sge.ctrlq[i].q); |
2572 | } |
2573 | |
2574 | static void process_db_drop(struct work_struct *work) |
2575 | { |
2576 | struct adapter *adap; |
2577 | |
2578 | adap = container_of(work, struct adapter, db_drop_task); |
2579 | |
2580 | if (is_t4(chip: adap->params.chip)) { |
2581 | drain_db_fifo(adap, usecs: dbfifo_drain_delay); |
2582 | notify_rdma_uld(adap, cmd: CXGB4_CONTROL_DB_DROP); |
2583 | drain_db_fifo(adap, usecs: dbfifo_drain_delay); |
2584 | recover_all_queues(adap); |
2585 | drain_db_fifo(adap, usecs: dbfifo_drain_delay); |
2586 | enable_dbs(adap); |
2587 | notify_rdma_uld(adap, cmd: CXGB4_CONTROL_DB_EMPTY); |
2588 | } else if (is_t5(chip: adap->params.chip)) { |
2589 | u32 dropped_db = t4_read_reg(adap, reg_addr: 0x010ac); |
2590 | u16 qid = (dropped_db >> 15) & 0x1ffff; |
2591 | u16 pidx_inc = dropped_db & 0x1fff; |
2592 | u64 bar2_qoffset; |
2593 | unsigned int bar2_qid; |
2594 | int ret; |
2595 | |
2596 | ret = t4_bar2_sge_qregs(adapter: adap, qid, qtype: T4_BAR2_QTYPE_EGRESS, |
2597 | user: 0, pbar2_qoffset: &bar2_qoffset, pbar2_qid: &bar2_qid); |
2598 | if (ret) |
2599 | dev_err(adap->pdev_dev, "doorbell drop recovery: " |
2600 | "qid=%d, pidx_inc=%d\n" , qid, pidx_inc); |
2601 | else |
2602 | writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid), |
2603 | addr: adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); |
2604 | |
2605 | /* Re-enable BAR2 WC */ |
2606 | t4_set_reg_field(adap, addr: 0x10b0, mask: 1<<15, val: 1<<15); |
2607 | } |
2608 | |
2609 | if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
2610 | t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, val: 0); |
2611 | } |
2612 | |
2613 | void t4_db_full(struct adapter *adap) |
2614 | { |
2615 | if (is_t4(chip: adap->params.chip)) { |
2616 | disable_dbs(adap); |
2617 | notify_rdma_uld(adap, cmd: CXGB4_CONTROL_DB_FULL); |
2618 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, |
2619 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, val: 0); |
2620 | queue_work(wq: adap->workq, work: &adap->db_full_task); |
2621 | } |
2622 | } |
2623 | |
2624 | void t4_db_dropped(struct adapter *adap) |
2625 | { |
2626 | if (is_t4(chip: adap->params.chip)) { |
2627 | disable_dbs(adap); |
2628 | notify_rdma_uld(adap, cmd: CXGB4_CONTROL_DB_FULL); |
2629 | } |
2630 | queue_work(wq: adap->workq, work: &adap->db_drop_task); |
2631 | } |
2632 | |
2633 | void t4_register_netevent_notifier(void) |
2634 | { |
2635 | if (!netevent_registered) { |
2636 | register_netevent_notifier(nb: &cxgb4_netevent_nb); |
2637 | netevent_registered = true; |
2638 | } |
2639 | } |
2640 | |
2641 | static void detach_ulds(struct adapter *adap) |
2642 | { |
2643 | unsigned int i; |
2644 | |
2645 | if (!is_uld(adap)) |
2646 | return; |
2647 | |
2648 | mutex_lock(&uld_mutex); |
2649 | list_del(entry: &adap->list_node); |
2650 | |
2651 | for (i = 0; i < CXGB4_ULD_MAX; i++) |
2652 | if (adap->uld && adap->uld[i].handle) |
2653 | adap->uld[i].state_change(adap->uld[i].handle, |
2654 | CXGB4_STATE_DETACH); |
2655 | |
2656 | if (netevent_registered && list_empty(head: &adapter_list)) { |
2657 | unregister_netevent_notifier(nb: &cxgb4_netevent_nb); |
2658 | netevent_registered = false; |
2659 | } |
2660 | mutex_unlock(lock: &uld_mutex); |
2661 | } |
2662 | |
2663 | static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) |
2664 | { |
2665 | unsigned int i; |
2666 | |
2667 | mutex_lock(&uld_mutex); |
2668 | for (i = 0; i < CXGB4_ULD_MAX; i++) |
2669 | if (adap->uld && adap->uld[i].handle) |
2670 | adap->uld[i].state_change(adap->uld[i].handle, |
2671 | new_state); |
2672 | mutex_unlock(lock: &uld_mutex); |
2673 | } |
2674 | |
2675 | #if IS_ENABLED(CONFIG_IPV6) |
2676 | static int cxgb4_inet6addr_handler(struct notifier_block *this, |
2677 | unsigned long event, void *data) |
2678 | { |
2679 | struct inet6_ifaddr *ifa = data; |
2680 | struct net_device *event_dev = ifa->idev->dev; |
2681 | const struct device *parent = NULL; |
2682 | #if IS_ENABLED(CONFIG_BONDING) |
2683 | struct adapter *adap; |
2684 | #endif |
2685 | if (is_vlan_dev(dev: event_dev)) |
2686 | event_dev = vlan_dev_real_dev(dev: event_dev); |
2687 | #if IS_ENABLED(CONFIG_BONDING) |
2688 | if (event_dev->flags & IFF_MASTER) { |
2689 | list_for_each_entry(adap, &adapter_list, list_node) { |
2690 | switch (event) { |
2691 | case NETDEV_UP: |
2692 | cxgb4_clip_get(dev: adap->port[0], |
2693 | lip: (const u32 *)ifa, v6: 1); |
2694 | break; |
2695 | case NETDEV_DOWN: |
2696 | cxgb4_clip_release(dev: adap->port[0], |
2697 | lip: (const u32 *)ifa, v6: 1); |
2698 | break; |
2699 | default: |
2700 | break; |
2701 | } |
2702 | } |
2703 | return NOTIFY_OK; |
2704 | } |
2705 | #endif |
2706 | |
2707 | if (event_dev) |
2708 | parent = event_dev->dev.parent; |
2709 | |
2710 | if (parent && parent->driver == &cxgb4_driver.driver) { |
2711 | switch (event) { |
2712 | case NETDEV_UP: |
2713 | cxgb4_clip_get(dev: event_dev, lip: (const u32 *)ifa, v6: 1); |
2714 | break; |
2715 | case NETDEV_DOWN: |
2716 | cxgb4_clip_release(dev: event_dev, lip: (const u32 *)ifa, v6: 1); |
2717 | break; |
2718 | default: |
2719 | break; |
2720 | } |
2721 | } |
2722 | return NOTIFY_OK; |
2723 | } |
2724 | |
2725 | static bool inet6addr_registered; |
2726 | static struct notifier_block cxgb4_inet6addr_notifier = { |
2727 | .notifier_call = cxgb4_inet6addr_handler |
2728 | }; |
2729 | |
2730 | static void update_clip(const struct adapter *adap) |
2731 | { |
2732 | int i; |
2733 | struct net_device *dev; |
2734 | int ret; |
2735 | |
2736 | rcu_read_lock(); |
2737 | |
2738 | for (i = 0; i < MAX_NPORTS; i++) { |
2739 | dev = adap->port[i]; |
2740 | ret = 0; |
2741 | |
2742 | if (dev) |
2743 | ret = cxgb4_update_root_dev_clip(dev); |
2744 | |
2745 | if (ret < 0) |
2746 | break; |
2747 | } |
2748 | rcu_read_unlock(); |
2749 | } |
2750 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
2751 | |
2752 | /** |
2753 | * cxgb_up - enable the adapter |
2754 | * @adap: adapter being enabled |
2755 | * |
2756 | * Called when the first port is enabled, this function performs the |
2757 | * actions necessary to make an adapter operational, such as completing |
2758 | * the initialization of HW modules, and enabling interrupts. |
2759 | * |
2760 | * Must be called with the rtnl lock held. |
2761 | */ |
2762 | static int cxgb_up(struct adapter *adap) |
2763 | { |
2764 | struct sge *s = &adap->sge; |
2765 | int err; |
2766 | |
2767 | mutex_lock(&uld_mutex); |
2768 | err = setup_sge_queues(adap); |
2769 | if (err) |
2770 | goto rel_lock; |
2771 | err = setup_rss(adap); |
2772 | if (err) |
2773 | goto freeq; |
2774 | |
2775 | if (adap->flags & CXGB4_USING_MSIX) { |
2776 | if (s->nd_msix_idx < 0) { |
2777 | err = -ENOMEM; |
2778 | goto irq_err; |
2779 | } |
2780 | |
2781 | err = request_irq(irq: adap->msix_info[s->nd_msix_idx].vec, |
2782 | handler: t4_nondata_intr, flags: 0, |
2783 | name: adap->msix_info[s->nd_msix_idx].desc, dev: adap); |
2784 | if (err) |
2785 | goto irq_err; |
2786 | |
2787 | err = request_msix_queue_irqs(adap); |
2788 | if (err) |
2789 | goto irq_err_free_nd_msix; |
2790 | } else { |
2791 | err = request_irq(irq: adap->pdev->irq, handler: t4_intr_handler(adap), |
2792 | flags: (adap->flags & CXGB4_USING_MSI) ? 0 |
2793 | : IRQF_SHARED, |
2794 | name: adap->port[0]->name, dev: adap); |
2795 | if (err) |
2796 | goto irq_err; |
2797 | } |
2798 | |
2799 | enable_rx(adap); |
2800 | t4_sge_start(adap); |
2801 | t4_intr_enable(adapter: adap); |
2802 | adap->flags |= CXGB4_FULL_INIT_DONE; |
2803 | mutex_unlock(lock: &uld_mutex); |
2804 | |
2805 | notify_ulds(adap, new_state: CXGB4_STATE_UP); |
2806 | #if IS_ENABLED(CONFIG_IPV6) |
2807 | update_clip(adap); |
2808 | #endif |
2809 | return err; |
2810 | |
2811 | irq_err_free_nd_msix: |
2812 | free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); |
2813 | irq_err: |
2814 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n" , err); |
2815 | freeq: |
2816 | t4_free_sge_resources(adap); |
2817 | rel_lock: |
2818 | mutex_unlock(lock: &uld_mutex); |
2819 | return err; |
2820 | } |
2821 | |
2822 | static void cxgb_down(struct adapter *adapter) |
2823 | { |
2824 | cancel_work_sync(work: &adapter->tid_release_task); |
2825 | cancel_work_sync(work: &adapter->db_full_task); |
2826 | cancel_work_sync(work: &adapter->db_drop_task); |
2827 | adapter->tid_release_task_busy = false; |
2828 | adapter->tid_release_head = NULL; |
2829 | |
2830 | t4_sge_stop(adap: adapter); |
2831 | t4_free_sge_resources(adap: adapter); |
2832 | |
2833 | adapter->flags &= ~CXGB4_FULL_INIT_DONE; |
2834 | } |
2835 | |
2836 | /* |
2837 | * net_device operations |
2838 | */ |
2839 | static int cxgb_open(struct net_device *dev) |
2840 | { |
2841 | struct port_info *pi = netdev_priv(dev); |
2842 | struct adapter *adapter = pi->adapter; |
2843 | int err; |
2844 | |
2845 | netif_carrier_off(dev); |
2846 | |
2847 | if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { |
2848 | err = cxgb_up(adap: adapter); |
2849 | if (err < 0) |
2850 | return err; |
2851 | } |
2852 | |
2853 | /* It's possible that the basic port information could have |
2854 | * changed since we first read it. |
2855 | */ |
2856 | err = t4_update_port_info(pi); |
2857 | if (err < 0) |
2858 | return err; |
2859 | |
2860 | err = link_start(dev); |
2861 | if (err) |
2862 | return err; |
2863 | |
2864 | if (pi->nmirrorqsets) { |
2865 | mutex_lock(&pi->vi_mirror_mutex); |
2866 | err = cxgb4_port_mirror_alloc_queues(dev); |
2867 | if (err) |
2868 | goto out_unlock; |
2869 | |
2870 | err = cxgb4_port_mirror_start(dev); |
2871 | if (err) |
2872 | goto out_free_queues; |
2873 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
2874 | } |
2875 | |
2876 | netif_tx_start_all_queues(dev); |
2877 | return 0; |
2878 | |
2879 | out_free_queues: |
2880 | cxgb4_port_mirror_free_queues(dev); |
2881 | |
2882 | out_unlock: |
2883 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
2884 | return err; |
2885 | } |
2886 | |
2887 | static int cxgb_close(struct net_device *dev) |
2888 | { |
2889 | struct port_info *pi = netdev_priv(dev); |
2890 | struct adapter *adapter = pi->adapter; |
2891 | int ret; |
2892 | |
2893 | netif_tx_stop_all_queues(dev); |
2894 | netif_carrier_off(dev); |
2895 | ret = t4_enable_pi_params(adap: adapter, mbox: adapter->pf, pi, |
2896 | rx_en: false, tx_en: false, dcb_en: false); |
2897 | #ifdef CONFIG_CHELSIO_T4_DCB |
2898 | cxgb4_dcb_reset(dev); |
2899 | dcb_tx_queue_prio_enable(dev, enable: false); |
2900 | #endif |
2901 | if (ret) |
2902 | return ret; |
2903 | |
2904 | if (pi->nmirrorqsets) { |
2905 | mutex_lock(&pi->vi_mirror_mutex); |
2906 | cxgb4_port_mirror_stop(dev); |
2907 | cxgb4_port_mirror_free_queues(dev); |
2908 | mutex_unlock(lock: &pi->vi_mirror_mutex); |
2909 | } |
2910 | |
2911 | return 0; |
2912 | } |
2913 | |
2914 | int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, |
2915 | __be32 sip, __be16 sport, __be16 vlan, |
2916 | unsigned int queue, unsigned char port, unsigned char mask) |
2917 | { |
2918 | int ret; |
2919 | struct filter_entry *f; |
2920 | struct adapter *adap; |
2921 | int i; |
2922 | u8 *val; |
2923 | |
2924 | adap = netdev2adap(dev); |
2925 | |
2926 | /* Adjust stid to correct filter index */ |
2927 | stid -= adap->tids.sftid_base; |
2928 | stid += adap->tids.nftids; |
2929 | |
2930 | /* Check to make sure the filter requested is writable ... |
2931 | */ |
2932 | f = &adap->tids.ftid_tab[stid]; |
2933 | ret = writable_filter(f); |
2934 | if (ret) |
2935 | return ret; |
2936 | |
2937 | /* Clear out any old resources being used by the filter before |
2938 | * we start constructing the new filter. |
2939 | */ |
2940 | if (f->valid) |
2941 | clear_filter(adap, f); |
2942 | |
2943 | /* Clear out filter specifications */ |
2944 | memset(&f->fs, 0, sizeof(struct ch_filter_specification)); |
2945 | f->fs.val.lport = be16_to_cpu(sport); |
2946 | f->fs.mask.lport = ~0; |
2947 | val = (u8 *)&sip; |
2948 | if ((val[0] | val[1] | val[2] | val[3]) != 0) { |
2949 | for (i = 0; i < 4; i++) { |
2950 | f->fs.val.lip[i] = val[i]; |
2951 | f->fs.mask.lip[i] = ~0; |
2952 | } |
2953 | if (adap->params.tp.vlan_pri_map & PORT_F) { |
2954 | f->fs.val.iport = port; |
2955 | f->fs.mask.iport = mask; |
2956 | } |
2957 | } |
2958 | |
2959 | if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { |
2960 | f->fs.val.proto = IPPROTO_TCP; |
2961 | f->fs.mask.proto = ~0; |
2962 | } |
2963 | |
2964 | f->fs.dirsteer = 1; |
2965 | f->fs.iq = queue; |
2966 | /* Mark filter as locked */ |
2967 | f->locked = 1; |
2968 | f->fs.rpttid = 1; |
2969 | |
2970 | /* Save the actual tid. We need this to get the corresponding |
2971 | * filter entry structure in filter_rpl. |
2972 | */ |
2973 | f->tid = stid + adap->tids.ftid_base; |
2974 | ret = set_filter_wr(adapter: adap, fidx: stid); |
2975 | if (ret) { |
2976 | clear_filter(adap, f); |
2977 | return ret; |
2978 | } |
2979 | |
2980 | return 0; |
2981 | } |
2982 | EXPORT_SYMBOL(cxgb4_create_server_filter); |
2983 | |
2984 | int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, |
2985 | unsigned int queue, bool ipv6) |
2986 | { |
2987 | struct filter_entry *f; |
2988 | struct adapter *adap; |
2989 | |
2990 | adap = netdev2adap(dev); |
2991 | |
2992 | /* Adjust stid to correct filter index */ |
2993 | stid -= adap->tids.sftid_base; |
2994 | stid += adap->tids.nftids; |
2995 | |
2996 | f = &adap->tids.ftid_tab[stid]; |
2997 | /* Unlock the filter */ |
2998 | f->locked = 0; |
2999 | |
3000 | return delete_filter(adapter: adap, fidx: stid); |
3001 | } |
3002 | EXPORT_SYMBOL(cxgb4_remove_server_filter); |
3003 | |
3004 | static void cxgb_get_stats(struct net_device *dev, |
3005 | struct rtnl_link_stats64 *ns) |
3006 | { |
3007 | struct port_stats stats; |
3008 | struct port_info *p = netdev_priv(dev); |
3009 | struct adapter *adapter = p->adapter; |
3010 | |
3011 | /* Block retrieving statistics during EEH error |
3012 | * recovery. Otherwise, the recovery might fail |
3013 | * and the PCI device will be removed permanently |
3014 | */ |
3015 | spin_lock(lock: &adapter->stats_lock); |
3016 | if (!netif_device_present(dev)) { |
3017 | spin_unlock(lock: &adapter->stats_lock); |
3018 | return; |
3019 | } |
3020 | t4_get_port_stats_offset(adap: adapter, idx: p->tx_chan, stats: &stats, |
3021 | offset: &p->stats_base); |
3022 | spin_unlock(lock: &adapter->stats_lock); |
3023 | |
3024 | ns->tx_bytes = stats.tx_octets; |
3025 | ns->tx_packets = stats.tx_frames; |
3026 | ns->rx_bytes = stats.rx_octets; |
3027 | ns->rx_packets = stats.rx_frames; |
3028 | ns->multicast = stats.rx_mcast_frames; |
3029 | |
3030 | /* detailed rx_errors */ |
3031 | ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + |
3032 | stats.rx_runt; |
3033 | ns->rx_over_errors = 0; |
3034 | ns->rx_crc_errors = stats.rx_fcs_err; |
3035 | ns->rx_frame_errors = stats.rx_symbol_err; |
3036 | ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + |
3037 | stats.rx_ovflow2 + stats.rx_ovflow3 + |
3038 | stats.rx_trunc0 + stats.rx_trunc1 + |
3039 | stats.rx_trunc2 + stats.rx_trunc3; |
3040 | ns->rx_missed_errors = 0; |
3041 | |
3042 | /* detailed tx_errors */ |
3043 | ns->tx_aborted_errors = 0; |
3044 | ns->tx_carrier_errors = 0; |
3045 | ns->tx_fifo_errors = 0; |
3046 | ns->tx_heartbeat_errors = 0; |
3047 | ns->tx_window_errors = 0; |
3048 | |
3049 | ns->tx_errors = stats.tx_error_frames; |
3050 | ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + |
3051 | ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; |
3052 | } |
3053 | |
3054 | static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
3055 | { |
3056 | unsigned int mbox; |
3057 | int ret = 0, prtad, devad; |
3058 | struct port_info *pi = netdev_priv(dev); |
3059 | struct adapter *adapter = pi->adapter; |
3060 | struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; |
3061 | |
3062 | switch (cmd) { |
3063 | case SIOCGMIIPHY: |
3064 | if (pi->mdio_addr < 0) |
3065 | return -EOPNOTSUPP; |
3066 | data->phy_id = pi->mdio_addr; |
3067 | break; |
3068 | case SIOCGMIIREG: |
3069 | case SIOCSMIIREG: |
3070 | if (mdio_phy_id_is_c45(phy_id: data->phy_id)) { |
3071 | prtad = mdio_phy_id_prtad(phy_id: data->phy_id); |
3072 | devad = mdio_phy_id_devad(phy_id: data->phy_id); |
3073 | } else if (data->phy_id < 32) { |
3074 | prtad = data->phy_id; |
3075 | devad = 0; |
3076 | data->reg_num &= 0x1f; |
3077 | } else |
3078 | return -EINVAL; |
3079 | |
3080 | mbox = pi->adapter->pf; |
3081 | if (cmd == SIOCGMIIREG) |
3082 | ret = t4_mdio_rd(adap: pi->adapter, mbox, phy_addr: prtad, mmd: devad, |
3083 | reg: data->reg_num, valp: &data->val_out); |
3084 | else |
3085 | ret = t4_mdio_wr(adap: pi->adapter, mbox, phy_addr: prtad, mmd: devad, |
3086 | reg: data->reg_num, val: data->val_in); |
3087 | break; |
3088 | case SIOCGHWTSTAMP: |
3089 | return copy_to_user(to: req->ifr_data, from: &pi->tstamp_config, |
3090 | n: sizeof(pi->tstamp_config)) ? |
3091 | -EFAULT : 0; |
3092 | case SIOCSHWTSTAMP: |
3093 | if (copy_from_user(to: &pi->tstamp_config, from: req->ifr_data, |
3094 | n: sizeof(pi->tstamp_config))) |
3095 | return -EFAULT; |
3096 | |
3097 | if (!is_t4(chip: adapter->params.chip)) { |
3098 | switch (pi->tstamp_config.tx_type) { |
3099 | case HWTSTAMP_TX_OFF: |
3100 | case HWTSTAMP_TX_ON: |
3101 | break; |
3102 | default: |
3103 | return -ERANGE; |
3104 | } |
3105 | |
3106 | switch (pi->tstamp_config.rx_filter) { |
3107 | case HWTSTAMP_FILTER_NONE: |
3108 | pi->rxtstamp = false; |
3109 | break; |
3110 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
3111 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
3112 | cxgb4_ptprx_timestamping(pi, port: pi->port_id, |
3113 | mode: PTP_TS_L4); |
3114 | break; |
3115 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
3116 | cxgb4_ptprx_timestamping(pi, port: pi->port_id, |
3117 | mode: PTP_TS_L2_L4); |
3118 | break; |
3119 | case HWTSTAMP_FILTER_ALL: |
3120 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
3121 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
3122 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
3123 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
3124 | pi->rxtstamp = true; |
3125 | break; |
3126 | default: |
3127 | pi->tstamp_config.rx_filter = |
3128 | HWTSTAMP_FILTER_NONE; |
3129 | return -ERANGE; |
3130 | } |
3131 | |
3132 | if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && |
3133 | (pi->tstamp_config.rx_filter == |
3134 | HWTSTAMP_FILTER_NONE)) { |
3135 | if (cxgb4_ptp_txtype(adap: adapter, port_id: pi->port_id) >= 0) |
3136 | pi->ptp_enable = false; |
3137 | } |
3138 | |
3139 | if (pi->tstamp_config.rx_filter != |
3140 | HWTSTAMP_FILTER_NONE) { |
3141 | if (cxgb4_ptp_redirect_rx_packet(adap: adapter, |
3142 | pi) >= 0) |
3143 | pi->ptp_enable = true; |
3144 | } |
3145 | } else { |
3146 | /* For T4 Adapters */ |
3147 | switch (pi->tstamp_config.rx_filter) { |
3148 | case HWTSTAMP_FILTER_NONE: |
3149 | pi->rxtstamp = false; |
3150 | break; |
3151 | case HWTSTAMP_FILTER_ALL: |
3152 | pi->rxtstamp = true; |
3153 | break; |
3154 | default: |
3155 | pi->tstamp_config.rx_filter = |
3156 | HWTSTAMP_FILTER_NONE; |
3157 | return -ERANGE; |
3158 | } |
3159 | } |
3160 | return copy_to_user(to: req->ifr_data, from: &pi->tstamp_config, |
3161 | n: sizeof(pi->tstamp_config)) ? |
3162 | -EFAULT : 0; |
3163 | default: |
3164 | return -EOPNOTSUPP; |
3165 | } |
3166 | return ret; |
3167 | } |
3168 | |
3169 | static void cxgb_set_rxmode(struct net_device *dev) |
3170 | { |
3171 | /* unfortunately we can't return errors to the stack */ |
3172 | set_rxmode(dev, mtu: -1, sleep_ok: false); |
3173 | } |
3174 | |
3175 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) |
3176 | { |
3177 | struct port_info *pi = netdev_priv(dev); |
3178 | int ret; |
3179 | |
3180 | ret = t4_set_rxmode(adap: pi->adapter, mbox: pi->adapter->mbox, viid: pi->viid, |
3181 | viid_mirror: pi->viid_mirror, mtu: new_mtu, promisc: -1, all_multi: -1, bcast: -1, vlanex: -1, sleep_ok: true); |
3182 | if (!ret) |
3183 | dev->mtu = new_mtu; |
3184 | return ret; |
3185 | } |
3186 | |
3187 | #ifdef CONFIG_PCI_IOV |
3188 | static int cxgb4_mgmt_open(struct net_device *dev) |
3189 | { |
3190 | /* Turn carrier off since we don't have to transmit anything on this |
3191 | * interface. |
3192 | */ |
3193 | netif_carrier_off(dev); |
3194 | return 0; |
3195 | } |
3196 | |
3197 | /* Fill MAC address that will be assigned by the FW */ |
3198 | static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) |
3199 | { |
3200 | u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN]; |
3201 | unsigned int i, vf, nvfs; |
3202 | u16 a, b; |
3203 | int err; |
3204 | u8 *na; |
3205 | |
3206 | err = t4_get_raw_vpd_params(adapter: adap, p: &adap->params.vpd); |
3207 | if (err) |
3208 | return; |
3209 | |
3210 | na = adap->params.vpd.na; |
3211 | for (i = 0; i < ETH_ALEN; i++) |
3212 | hw_addr[i] = (hex2val(c: na[2 * i + 0]) * 16 + |
3213 | hex2val(c: na[2 * i + 1])); |
3214 | |
3215 | a = (hw_addr[0] << 8) | hw_addr[1]; |
3216 | b = (hw_addr[1] << 8) | hw_addr[2]; |
3217 | a ^= b; |
3218 | a |= 0x0200; /* locally assigned Ethernet MAC address */ |
3219 | a &= ~0x0100; /* not a multicast Ethernet MAC address */ |
3220 | macaddr[0] = a >> 8; |
3221 | macaddr[1] = a & 0xff; |
3222 | |
3223 | for (i = 2; i < 5; i++) |
3224 | macaddr[i] = hw_addr[i + 1]; |
3225 | |
3226 | for (vf = 0, nvfs = pci_sriov_get_totalvfs(dev: adap->pdev); |
3227 | vf < nvfs; vf++) { |
3228 | macaddr[5] = adap->pf * nvfs + vf; |
3229 | ether_addr_copy(dst: adap->vfinfo[vf].vf_mac_addr, src: macaddr); |
3230 | } |
3231 | } |
3232 | |
3233 | static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac) |
3234 | { |
3235 | struct port_info *pi = netdev_priv(dev); |
3236 | struct adapter *adap = pi->adapter; |
3237 | int ret; |
3238 | |
3239 | /* verify MAC addr is valid */ |
3240 | if (!is_valid_ether_addr(addr: mac)) { |
3241 | dev_err(pi->adapter->pdev_dev, |
3242 | "Invalid Ethernet address %pM for VF %d\n" , |
3243 | mac, vf); |
3244 | return -EINVAL; |
3245 | } |
3246 | |
3247 | dev_info(pi->adapter->pdev_dev, |
3248 | "Setting MAC %pM on VF %d\n" , mac, vf); |
3249 | ret = t4_set_vf_mac_acl(adapter: adap, vf: vf + 1, naddr: 1, addr: mac); |
3250 | if (!ret) |
3251 | ether_addr_copy(dst: adap->vfinfo[vf].vf_mac_addr, src: mac); |
3252 | return ret; |
3253 | } |
3254 | |
3255 | static int cxgb4_mgmt_get_vf_config(struct net_device *dev, |
3256 | int vf, struct ifla_vf_info *ivi) |
3257 | { |
3258 | struct port_info *pi = netdev_priv(dev); |
3259 | struct adapter *adap = pi->adapter; |
3260 | struct vf_info *vfinfo; |
3261 | |
3262 | if (vf >= adap->num_vfs) |
3263 | return -EINVAL; |
3264 | vfinfo = &adap->vfinfo[vf]; |
3265 | |
3266 | ivi->vf = vf; |
3267 | ivi->max_tx_rate = vfinfo->tx_rate; |
3268 | ivi->min_tx_rate = 0; |
3269 | ether_addr_copy(dst: ivi->mac, src: vfinfo->vf_mac_addr); |
3270 | ivi->vlan = vfinfo->vlan; |
3271 | ivi->linkstate = vfinfo->link_state; |
3272 | return 0; |
3273 | } |
3274 | |
3275 | static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev, |
3276 | struct netdev_phys_item_id *ppid) |
3277 | { |
3278 | struct port_info *pi = netdev_priv(dev); |
3279 | unsigned int phy_port_id; |
3280 | |
3281 | phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; |
3282 | ppid->id_len = sizeof(phy_port_id); |
3283 | memcpy(ppid->id, &phy_port_id, ppid->id_len); |
3284 | return 0; |
3285 | } |
3286 | |
3287 | static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, |
3288 | int min_tx_rate, int max_tx_rate) |
3289 | { |
3290 | struct port_info *pi = netdev_priv(dev); |
3291 | struct adapter *adap = pi->adapter; |
3292 | unsigned int link_ok, speed, mtu; |
3293 | u32 fw_pfvf, fw_class; |
3294 | int class_id = vf; |
3295 | int ret; |
3296 | u16 pktsize; |
3297 | |
3298 | if (vf >= adap->num_vfs) |
3299 | return -EINVAL; |
3300 | |
3301 | if (min_tx_rate) { |
3302 | dev_err(adap->pdev_dev, |
3303 | "Min tx rate (%d) (> 0) for VF %d is Invalid.\n" , |
3304 | min_tx_rate, vf); |
3305 | return -EINVAL; |
3306 | } |
3307 | |
3308 | if (max_tx_rate == 0) { |
3309 | /* unbind VF to to any Traffic Class */ |
3310 | fw_pfvf = |
3311 | (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | |
3312 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); |
3313 | fw_class = 0xffffffff; |
3314 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: vf + 1, nparams: 1, |
3315 | params: &fw_pfvf, val: &fw_class); |
3316 | if (ret) { |
3317 | dev_err(adap->pdev_dev, |
3318 | "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n" , |
3319 | ret, adap->pf, vf); |
3320 | return -EINVAL; |
3321 | } |
3322 | dev_info(adap->pdev_dev, |
3323 | "PF %d VF %d is unbound from TX Rate Limiting\n" , |
3324 | adap->pf, vf); |
3325 | adap->vfinfo[vf].tx_rate = 0; |
3326 | return 0; |
3327 | } |
3328 | |
3329 | ret = t4_get_link_params(pi, link_okp: &link_ok, speedp: &speed, mtup: &mtu); |
3330 | if (ret != FW_SUCCESS) { |
3331 | dev_err(adap->pdev_dev, |
3332 | "Failed to get link information for VF %d\n" , vf); |
3333 | return -EINVAL; |
3334 | } |
3335 | |
3336 | if (!link_ok) { |
3337 | dev_err(adap->pdev_dev, "Link down for VF %d\n" , vf); |
3338 | return -EINVAL; |
3339 | } |
3340 | |
3341 | if (max_tx_rate > speed) { |
3342 | dev_err(adap->pdev_dev, |
3343 | "Max tx rate %d for VF %d can't be > link-speed %u" , |
3344 | max_tx_rate, vf, speed); |
3345 | return -EINVAL; |
3346 | } |
3347 | |
3348 | pktsize = mtu; |
3349 | /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ |
3350 | pktsize = pktsize - sizeof(struct ethhdr) - 4; |
3351 | /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ |
3352 | pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); |
3353 | /* configure Traffic Class for rate-limiting */ |
3354 | ret = t4_sched_params(adapter: adap, type: SCHED_CLASS_TYPE_PACKET, |
3355 | level: SCHED_CLASS_LEVEL_CL_RL, |
3356 | mode: SCHED_CLASS_MODE_CLASS, |
3357 | rateunit: SCHED_CLASS_RATEUNIT_BITS, |
3358 | ratemode: SCHED_CLASS_RATEMODE_ABS, |
3359 | channel: pi->tx_chan, class: class_id, minrate: 0, |
3360 | maxrate: max_tx_rate * 1000, weight: 0, pktsize, burstsize: 0); |
3361 | if (ret) { |
3362 | dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n" , |
3363 | ret); |
3364 | return -EINVAL; |
3365 | } |
3366 | dev_info(adap->pdev_dev, |
3367 | "Class %d with MSS %u configured with rate %u\n" , |
3368 | class_id, pktsize, max_tx_rate); |
3369 | |
3370 | /* bind VF to configured Traffic Class */ |
3371 | fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | |
3372 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); |
3373 | fw_class = class_id; |
3374 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: vf + 1, nparams: 1, params: &fw_pfvf, |
3375 | val: &fw_class); |
3376 | if (ret) { |
3377 | dev_err(adap->pdev_dev, |
3378 | "Err %d in binding PF %d VF %d to Traffic Class %d\n" , |
3379 | ret, adap->pf, vf, class_id); |
3380 | return -EINVAL; |
3381 | } |
3382 | dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n" , |
3383 | adap->pf, vf, class_id); |
3384 | adap->vfinfo[vf].tx_rate = max_tx_rate; |
3385 | return 0; |
3386 | } |
3387 | |
3388 | static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf, |
3389 | u16 vlan, u8 qos, __be16 vlan_proto) |
3390 | { |
3391 | struct port_info *pi = netdev_priv(dev); |
3392 | struct adapter *adap = pi->adapter; |
3393 | int ret; |
3394 | |
3395 | if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) |
3396 | return -EINVAL; |
3397 | |
3398 | if (vlan_proto != htons(ETH_P_8021Q) || qos != 0) |
3399 | return -EPROTONOSUPPORT; |
3400 | |
3401 | ret = t4_set_vlan_acl(adap, mbox: adap->mbox, vf: vf + 1, vlan); |
3402 | if (!ret) { |
3403 | adap->vfinfo[vf].vlan = vlan; |
3404 | return 0; |
3405 | } |
3406 | |
3407 | dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n" , |
3408 | ret, (vlan ? "setting" : "clearing" ), adap->pf, vf); |
3409 | return ret; |
3410 | } |
3411 | |
3412 | static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf, |
3413 | int link) |
3414 | { |
3415 | struct port_info *pi = netdev_priv(dev); |
3416 | struct adapter *adap = pi->adapter; |
3417 | u32 param, val; |
3418 | int ret = 0; |
3419 | |
3420 | if (vf >= adap->num_vfs) |
3421 | return -EINVAL; |
3422 | |
3423 | switch (link) { |
3424 | case IFLA_VF_LINK_STATE_AUTO: |
3425 | val = FW_VF_LINK_STATE_AUTO; |
3426 | break; |
3427 | |
3428 | case IFLA_VF_LINK_STATE_ENABLE: |
3429 | val = FW_VF_LINK_STATE_ENABLE; |
3430 | break; |
3431 | |
3432 | case IFLA_VF_LINK_STATE_DISABLE: |
3433 | val = FW_VF_LINK_STATE_DISABLE; |
3434 | break; |
3435 | |
3436 | default: |
3437 | return -EINVAL; |
3438 | } |
3439 | |
3440 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | |
3441 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE)); |
3442 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: vf + 1, nparams: 1, |
3443 | params: ¶m, val: &val); |
3444 | if (ret) { |
3445 | dev_err(adap->pdev_dev, |
3446 | "Error %d in setting PF %d VF %d link state\n" , |
3447 | ret, adap->pf, vf); |
3448 | return -EINVAL; |
3449 | } |
3450 | |
3451 | adap->vfinfo[vf].link_state = link; |
3452 | return ret; |
3453 | } |
3454 | #endif /* CONFIG_PCI_IOV */ |
3455 | |
3456 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) |
3457 | { |
3458 | int ret; |
3459 | struct sockaddr *addr = p; |
3460 | struct port_info *pi = netdev_priv(dev); |
3461 | |
3462 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
3463 | return -EADDRNOTAVAIL; |
3464 | |
3465 | ret = cxgb4_update_mac_filt(pi, viid: pi->viid, tcam_idx: &pi->xact_addr_filt, |
3466 | addr: addr->sa_data, persistent: true, smt_idx: &pi->smt_idx); |
3467 | if (ret < 0) |
3468 | return ret; |
3469 | |
3470 | eth_hw_addr_set(dev, addr: addr->sa_data); |
3471 | return 0; |
3472 | } |
3473 | |
3474 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3475 | static void cxgb_netpoll(struct net_device *dev) |
3476 | { |
3477 | struct port_info *pi = netdev_priv(dev); |
3478 | struct adapter *adap = pi->adapter; |
3479 | |
3480 | if (adap->flags & CXGB4_USING_MSIX) { |
3481 | int i; |
3482 | struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; |
3483 | |
3484 | for (i = pi->nqsets; i; i--, rx++) |
3485 | t4_sge_intr_msix(irq: 0, cookie: &rx->rspq); |
3486 | } else |
3487 | t4_intr_handler(adap)(0, adap); |
3488 | } |
3489 | #endif |
3490 | |
3491 | static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) |
3492 | { |
3493 | struct port_info *pi = netdev_priv(dev); |
3494 | struct adapter *adap = pi->adapter; |
3495 | struct ch_sched_queue qe = { 0 }; |
3496 | struct ch_sched_params p = { 0 }; |
3497 | struct sched_class *e; |
3498 | u32 req_rate; |
3499 | int err = 0; |
3500 | |
3501 | if (!can_sched(dev)) |
3502 | return -ENOTSUPP; |
3503 | |
3504 | if (index < 0 || index > pi->nqsets - 1) |
3505 | return -EINVAL; |
3506 | |
3507 | if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
3508 | dev_err(adap->pdev_dev, |
3509 | "Failed to rate limit on queue %d. Link Down?\n" , |
3510 | index); |
3511 | return -EINVAL; |
3512 | } |
3513 | |
3514 | qe.queue = index; |
3515 | e = cxgb4_sched_queue_lookup(dev, p: &qe); |
3516 | if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { |
3517 | dev_err(adap->pdev_dev, |
3518 | "Queue %u already bound to class %u of type: %u\n" , |
3519 | index, e->idx, e->info.u.params.level); |
3520 | return -EBUSY; |
3521 | } |
3522 | |
3523 | /* Convert from Mbps to Kbps */ |
3524 | req_rate = rate * 1000; |
3525 | |
3526 | /* Max rate is 100 Gbps */ |
3527 | if (req_rate > SCHED_MAX_RATE_KBPS) { |
3528 | dev_err(adap->pdev_dev, |
3529 | "Invalid rate %u Mbps, Max rate is %u Mbps\n" , |
3530 | rate, SCHED_MAX_RATE_KBPS / 1000); |
3531 | return -ERANGE; |
3532 | } |
3533 | |
3534 | /* First unbind the queue from any existing class */ |
3535 | memset(&qe, 0, sizeof(qe)); |
3536 | qe.queue = index; |
3537 | qe.class = SCHED_CLS_NONE; |
3538 | |
3539 | err = cxgb4_sched_class_unbind(dev, arg: (void *)(&qe), type: SCHED_QUEUE); |
3540 | if (err) { |
3541 | dev_err(adap->pdev_dev, |
3542 | "Unbinding Queue %d on port %d fail. Err: %d\n" , |
3543 | index, pi->port_id, err); |
3544 | return err; |
3545 | } |
3546 | |
3547 | /* Queue already unbound */ |
3548 | if (!req_rate) |
3549 | return 0; |
3550 | |
3551 | /* Fetch any available unused or matching scheduling class */ |
3552 | p.type = SCHED_CLASS_TYPE_PACKET; |
3553 | p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; |
3554 | p.u.params.mode = SCHED_CLASS_MODE_CLASS; |
3555 | p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS; |
3556 | p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS; |
3557 | p.u.params.channel = pi->tx_chan; |
3558 | p.u.params.class = SCHED_CLS_NONE; |
3559 | p.u.params.minrate = 0; |
3560 | p.u.params.maxrate = req_rate; |
3561 | p.u.params.weight = 0; |
3562 | p.u.params.pktsize = dev->mtu; |
3563 | |
3564 | e = cxgb4_sched_class_alloc(dev, p: &p); |
3565 | if (!e) |
3566 | return -ENOMEM; |
3567 | |
3568 | /* Bind the queue to a scheduling class */ |
3569 | memset(&qe, 0, sizeof(qe)); |
3570 | qe.queue = index; |
3571 | qe.class = e->idx; |
3572 | |
3573 | err = cxgb4_sched_class_bind(dev, arg: (void *)(&qe), type: SCHED_QUEUE); |
3574 | if (err) |
3575 | dev_err(adap->pdev_dev, |
3576 | "Queue rate limiting failed. Err: %d\n" , err); |
3577 | return err; |
3578 | } |
3579 | |
3580 | static int cxgb_setup_tc_flower(struct net_device *dev, |
3581 | struct flow_cls_offload *cls_flower) |
3582 | { |
3583 | switch (cls_flower->command) { |
3584 | case FLOW_CLS_REPLACE: |
3585 | return cxgb4_tc_flower_replace(dev, cls: cls_flower); |
3586 | case FLOW_CLS_DESTROY: |
3587 | return cxgb4_tc_flower_destroy(dev, cls: cls_flower); |
3588 | case FLOW_CLS_STATS: |
3589 | return cxgb4_tc_flower_stats(dev, cls: cls_flower); |
3590 | default: |
3591 | return -EOPNOTSUPP; |
3592 | } |
3593 | } |
3594 | |
3595 | static int cxgb_setup_tc_cls_u32(struct net_device *dev, |
3596 | struct tc_cls_u32_offload *cls_u32) |
3597 | { |
3598 | switch (cls_u32->command) { |
3599 | case TC_CLSU32_NEW_KNODE: |
3600 | case TC_CLSU32_REPLACE_KNODE: |
3601 | return cxgb4_config_knode(dev, cls: cls_u32); |
3602 | case TC_CLSU32_DELETE_KNODE: |
3603 | return cxgb4_delete_knode(dev, cls: cls_u32); |
3604 | default: |
3605 | return -EOPNOTSUPP; |
3606 | } |
3607 | } |
3608 | |
3609 | static int cxgb_setup_tc_matchall(struct net_device *dev, |
3610 | struct tc_cls_matchall_offload *cls_matchall, |
3611 | bool ingress) |
3612 | { |
3613 | struct adapter *adap = netdev2adap(dev); |
3614 | |
3615 | if (!adap->tc_matchall) |
3616 | return -ENOMEM; |
3617 | |
3618 | switch (cls_matchall->command) { |
3619 | case TC_CLSMATCHALL_REPLACE: |
3620 | return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress); |
3621 | case TC_CLSMATCHALL_DESTROY: |
3622 | return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress); |
3623 | case TC_CLSMATCHALL_STATS: |
3624 | if (ingress) |
3625 | return cxgb4_tc_matchall_stats(dev, cls_matchall); |
3626 | break; |
3627 | default: |
3628 | break; |
3629 | } |
3630 | |
3631 | return -EOPNOTSUPP; |
3632 | } |
3633 | |
3634 | static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, |
3635 | void *type_data, void *cb_priv) |
3636 | { |
3637 | struct net_device *dev = cb_priv; |
3638 | struct port_info *pi = netdev2pinfo(dev); |
3639 | struct adapter *adap = netdev2adap(dev); |
3640 | |
3641 | if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
3642 | dev_err(adap->pdev_dev, |
3643 | "Failed to setup tc on port %d. Link Down?\n" , |
3644 | pi->port_id); |
3645 | return -EINVAL; |
3646 | } |
3647 | |
3648 | if (!tc_cls_can_offload_and_chain0(dev, common: type_data)) |
3649 | return -EOPNOTSUPP; |
3650 | |
3651 | switch (type) { |
3652 | case TC_SETUP_CLSU32: |
3653 | return cxgb_setup_tc_cls_u32(dev, cls_u32: type_data); |
3654 | case TC_SETUP_CLSFLOWER: |
3655 | return cxgb_setup_tc_flower(dev, cls_flower: type_data); |
3656 | case TC_SETUP_CLSMATCHALL: |
3657 | return cxgb_setup_tc_matchall(dev, cls_matchall: type_data, ingress: true); |
3658 | default: |
3659 | return -EOPNOTSUPP; |
3660 | } |
3661 | } |
3662 | |
3663 | static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, |
3664 | void *type_data, void *cb_priv) |
3665 | { |
3666 | struct net_device *dev = cb_priv; |
3667 | struct port_info *pi = netdev2pinfo(dev); |
3668 | struct adapter *adap = netdev2adap(dev); |
3669 | |
3670 | if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
3671 | dev_err(adap->pdev_dev, |
3672 | "Failed to setup tc on port %d. Link Down?\n" , |
3673 | pi->port_id); |
3674 | return -EINVAL; |
3675 | } |
3676 | |
3677 | if (!tc_cls_can_offload_and_chain0(dev, common: type_data)) |
3678 | return -EOPNOTSUPP; |
3679 | |
3680 | switch (type) { |
3681 | case TC_SETUP_CLSMATCHALL: |
3682 | return cxgb_setup_tc_matchall(dev, cls_matchall: type_data, ingress: false); |
3683 | default: |
3684 | break; |
3685 | } |
3686 | |
3687 | return -EOPNOTSUPP; |
3688 | } |
3689 | |
3690 | static int cxgb_setup_tc_mqprio(struct net_device *dev, |
3691 | struct tc_mqprio_qopt_offload *mqprio) |
3692 | { |
3693 | struct adapter *adap = netdev2adap(dev); |
3694 | |
3695 | if (!is_ethofld(adap) || !adap->tc_mqprio) |
3696 | return -ENOMEM; |
3697 | |
3698 | return cxgb4_setup_tc_mqprio(dev, mqprio); |
3699 | } |
3700 | |
3701 | static LIST_HEAD(cxgb_block_cb_list); |
3702 | |
3703 | static int cxgb_setup_tc_block(struct net_device *dev, |
3704 | struct flow_block_offload *f) |
3705 | { |
3706 | struct port_info *pi = netdev_priv(dev); |
3707 | flow_setup_cb_t *cb; |
3708 | bool ingress_only; |
3709 | |
3710 | pi->tc_block_shared = f->block_shared; |
3711 | if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { |
3712 | cb = cxgb_setup_tc_block_egress_cb; |
3713 | ingress_only = false; |
3714 | } else { |
3715 | cb = cxgb_setup_tc_block_ingress_cb; |
3716 | ingress_only = true; |
3717 | } |
3718 | |
3719 | return flow_block_cb_setup_simple(f, driver_list: &cxgb_block_cb_list, |
3720 | cb, cb_ident: pi, cb_priv: dev, ingress_only); |
3721 | } |
3722 | |
3723 | static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, |
3724 | void *type_data) |
3725 | { |
3726 | switch (type) { |
3727 | case TC_SETUP_QDISC_MQPRIO: |
3728 | return cxgb_setup_tc_mqprio(dev, mqprio: type_data); |
3729 | case TC_SETUP_BLOCK: |
3730 | return cxgb_setup_tc_block(dev, f: type_data); |
3731 | default: |
3732 | return -EOPNOTSUPP; |
3733 | } |
3734 | } |
3735 | |
3736 | static int cxgb_udp_tunnel_unset_port(struct net_device *netdev, |
3737 | unsigned int table, unsigned int entry, |
3738 | struct udp_tunnel_info *ti) |
3739 | { |
3740 | struct port_info *pi = netdev_priv(dev: netdev); |
3741 | struct adapter *adapter = pi->adapter; |
3742 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; |
3743 | int ret = 0, i; |
3744 | |
3745 | switch (ti->type) { |
3746 | case UDP_TUNNEL_TYPE_VXLAN: |
3747 | adapter->vxlan_port = 0; |
3748 | t4_write_reg(adap: adapter, MPS_RX_VXLAN_TYPE_A, val: 0); |
3749 | break; |
3750 | case UDP_TUNNEL_TYPE_GENEVE: |
3751 | adapter->geneve_port = 0; |
3752 | t4_write_reg(adap: adapter, MPS_RX_GENEVE_TYPE_A, val: 0); |
3753 | break; |
3754 | default: |
3755 | return -EINVAL; |
3756 | } |
3757 | |
3758 | /* Matchall mac entries can be deleted only after all tunnel ports |
3759 | * are brought down or removed. |
3760 | */ |
3761 | if (!adapter->rawf_cnt) |
3762 | return 0; |
3763 | for_each_port(adapter, i) { |
3764 | pi = adap2pinfo(adap: adapter, idx: i); |
3765 | ret = t4_free_raw_mac_filt(adap: adapter, viid: pi->viid, |
3766 | addr: match_all_mac, mask: match_all_mac, |
3767 | idx: adapter->rawf_start + pi->port_id, |
3768 | lookup_type: 1, port_id: pi->port_id, sleep_ok: false); |
3769 | if (ret < 0) { |
3770 | netdev_info(dev: netdev, format: "Failed to free mac filter entry, for port %d\n" , |
3771 | i); |
3772 | return ret; |
3773 | } |
3774 | } |
3775 | |
3776 | return 0; |
3777 | } |
3778 | |
3779 | static int cxgb_udp_tunnel_set_port(struct net_device *netdev, |
3780 | unsigned int table, unsigned int entry, |
3781 | struct udp_tunnel_info *ti) |
3782 | { |
3783 | struct port_info *pi = netdev_priv(dev: netdev); |
3784 | struct adapter *adapter = pi->adapter; |
3785 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; |
3786 | int i, ret; |
3787 | |
3788 | switch (ti->type) { |
3789 | case UDP_TUNNEL_TYPE_VXLAN: |
3790 | adapter->vxlan_port = ti->port; |
3791 | t4_write_reg(adap: adapter, MPS_RX_VXLAN_TYPE_A, |
3792 | VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); |
3793 | break; |
3794 | case UDP_TUNNEL_TYPE_GENEVE: |
3795 | adapter->geneve_port = ti->port; |
3796 | t4_write_reg(adap: adapter, MPS_RX_GENEVE_TYPE_A, |
3797 | GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); |
3798 | break; |
3799 | default: |
3800 | return -EINVAL; |
3801 | } |
3802 | |
3803 | /* Create a 'match all' mac filter entry for inner mac, |
3804 | * if raw mac interface is supported. Once the linux kernel provides |
3805 | * driver entry points for adding/deleting the inner mac addresses, |
3806 | * we will remove this 'match all' entry and fallback to adding |
3807 | * exact match filters. |
3808 | */ |
3809 | for_each_port(adapter, i) { |
3810 | pi = adap2pinfo(adap: adapter, idx: i); |
3811 | |
3812 | ret = t4_alloc_raw_mac_filt(adap: adapter, viid: pi->viid, |
3813 | addr: match_all_mac, |
3814 | mask: match_all_mac, |
3815 | idx: adapter->rawf_start + pi->port_id, |
3816 | lookup_type: 1, port_id: pi->port_id, sleep_ok: false); |
3817 | if (ret < 0) { |
3818 | netdev_info(dev: netdev, format: "Failed to allocate a mac filter entry, not adding port %d\n" , |
3819 | be16_to_cpu(ti->port)); |
3820 | return ret; |
3821 | } |
3822 | } |
3823 | |
3824 | return 0; |
3825 | } |
3826 | |
3827 | static const struct udp_tunnel_nic_info cxgb_udp_tunnels = { |
3828 | .set_port = cxgb_udp_tunnel_set_port, |
3829 | .unset_port = cxgb_udp_tunnel_unset_port, |
3830 | .tables = { |
3831 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
3832 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
3833 | }, |
3834 | }; |
3835 | |
3836 | static netdev_features_t cxgb_features_check(struct sk_buff *skb, |
3837 | struct net_device *dev, |
3838 | netdev_features_t features) |
3839 | { |
3840 | struct port_info *pi = netdev_priv(dev); |
3841 | struct adapter *adapter = pi->adapter; |
3842 | |
3843 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) |
3844 | return features; |
3845 | |
3846 | /* Check if hw supports offload for this packet */ |
3847 | if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) |
3848 | return features; |
3849 | |
3850 | /* Offload is not supported for this encapsulated packet */ |
3851 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
3852 | } |
3853 | |
3854 | static netdev_features_t cxgb_fix_features(struct net_device *dev, |
3855 | netdev_features_t features) |
3856 | { |
3857 | /* Disable GRO, if RX_CSUM is disabled */ |
3858 | if (!(features & NETIF_F_RXCSUM)) |
3859 | features &= ~NETIF_F_GRO; |
3860 | |
3861 | return features; |
3862 | } |
3863 | |
3864 | static const struct net_device_ops cxgb4_netdev_ops = { |
3865 | .ndo_open = cxgb_open, |
3866 | .ndo_stop = cxgb_close, |
3867 | .ndo_start_xmit = t4_start_xmit, |
3868 | .ndo_select_queue = cxgb_select_queue, |
3869 | .ndo_get_stats64 = cxgb_get_stats, |
3870 | .ndo_set_rx_mode = cxgb_set_rxmode, |
3871 | .ndo_set_mac_address = cxgb_set_mac_addr, |
3872 | .ndo_set_features = cxgb_set_features, |
3873 | .ndo_validate_addr = eth_validate_addr, |
3874 | .ndo_eth_ioctl = cxgb_ioctl, |
3875 | .ndo_change_mtu = cxgb_change_mtu, |
3876 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3877 | .ndo_poll_controller = cxgb_netpoll, |
3878 | #endif |
3879 | #ifdef CONFIG_CHELSIO_T4_FCOE |
3880 | .ndo_fcoe_enable = cxgb_fcoe_enable, |
3881 | .ndo_fcoe_disable = cxgb_fcoe_disable, |
3882 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
3883 | .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, |
3884 | .ndo_setup_tc = cxgb_setup_tc, |
3885 | .ndo_features_check = cxgb_features_check, |
3886 | .ndo_fix_features = cxgb_fix_features, |
3887 | }; |
3888 | |
3889 | #ifdef CONFIG_PCI_IOV |
3890 | static const struct net_device_ops cxgb4_mgmt_netdev_ops = { |
3891 | .ndo_open = cxgb4_mgmt_open, |
3892 | .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, |
3893 | .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, |
3894 | .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, |
3895 | .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, |
3896 | .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, |
3897 | .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state, |
3898 | }; |
3899 | |
3900 | static void cxgb4_mgmt_get_drvinfo(struct net_device *dev, |
3901 | struct ethtool_drvinfo *info) |
3902 | { |
3903 | struct adapter *adapter = netdev2adap(dev); |
3904 | |
3905 | strscpy(p: info->driver, q: cxgb4_driver_name, size: sizeof(info->driver)); |
3906 | strscpy(p: info->bus_info, q: pci_name(pdev: adapter->pdev), |
3907 | size: sizeof(info->bus_info)); |
3908 | } |
3909 | |
3910 | static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { |
3911 | .get_drvinfo = cxgb4_mgmt_get_drvinfo, |
3912 | }; |
3913 | #endif |
3914 | |
3915 | static void notify_fatal_err(struct work_struct *work) |
3916 | { |
3917 | struct adapter *adap; |
3918 | |
3919 | adap = container_of(work, struct adapter, fatal_err_notify_task); |
3920 | notify_ulds(adap, new_state: CXGB4_STATE_FATAL_ERROR); |
3921 | } |
3922 | |
3923 | void t4_fatal_err(struct adapter *adap) |
3924 | { |
3925 | int port; |
3926 | |
3927 | if (pci_channel_offline(pdev: adap->pdev)) |
3928 | return; |
3929 | |
3930 | /* Disable the SGE since ULDs are going to free resources that |
3931 | * could be exposed to the adapter. RDMA MWs for example... |
3932 | */ |
3933 | t4_shutdown_adapter(adapter: adap); |
3934 | for_each_port(adap, port) { |
3935 | struct net_device *dev = adap->port[port]; |
3936 | |
3937 | /* If we get here in very early initialization the network |
3938 | * devices may not have been set up yet. |
3939 | */ |
3940 | if (!dev) |
3941 | continue; |
3942 | |
3943 | netif_tx_stop_all_queues(dev); |
3944 | netif_carrier_off(dev); |
3945 | } |
3946 | dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n" ); |
3947 | queue_work(wq: adap->workq, work: &adap->fatal_err_notify_task); |
3948 | } |
3949 | |
3950 | static void setup_memwin(struct adapter *adap) |
3951 | { |
3952 | u32 nic_win_base = t4_get_util_window(adap); |
3953 | |
3954 | t4_setup_memwin(adap, memwin_base: nic_win_base, window: MEMWIN_NIC); |
3955 | } |
3956 | |
3957 | static void setup_memwin_rdma(struct adapter *adap) |
3958 | { |
3959 | if (adap->vres.ocq.size) { |
3960 | u32 start; |
3961 | unsigned int sz_kb; |
3962 | |
3963 | start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); |
3964 | start &= PCI_BASE_ADDRESS_MEM_MASK; |
3965 | start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); |
3966 | sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; |
3967 | t4_write_reg(adap, |
3968 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3), |
3969 | val: start | BIR_V(1) | WINDOW_V(ilog2(sz_kb))); |
3970 | t4_write_reg(adap, |
3971 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3), |
3972 | val: adap->vres.ocq.start); |
3973 | t4_read_reg(adap, |
3974 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3)); |
3975 | } |
3976 | } |
3977 | |
3978 | /* HMA Definitions */ |
3979 | |
3980 | /* The maximum number of address that can be send in a single FW cmd */ |
3981 | #define HMA_MAX_ADDR_IN_CMD 5 |
3982 | |
3983 | #define HMA_PAGE_SIZE PAGE_SIZE |
3984 | |
3985 | #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */ |
3986 | |
3987 | #define HMA_PAGE_ORDER \ |
3988 | ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \ |
3989 | ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0) |
3990 | |
3991 | /* The minimum and maximum possible HMA sizes that can be specified in the FW |
3992 | * configuration(in units of MB). |
3993 | */ |
3994 | #define HMA_MIN_TOTAL_SIZE 1 |
3995 | #define HMA_MAX_TOTAL_SIZE \ |
3996 | (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \ |
3997 | HMA_MAX_NO_FW_ADDRESS) >> 20) |
3998 | |
3999 | static void adap_free_hma_mem(struct adapter *adapter) |
4000 | { |
4001 | struct scatterlist *iter; |
4002 | struct page *page; |
4003 | int i; |
4004 | |
4005 | if (!adapter->hma.sgt) |
4006 | return; |
4007 | |
4008 | if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { |
4009 | dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, |
4010 | adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); |
4011 | adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; |
4012 | } |
4013 | |
4014 | for_each_sg(adapter->hma.sgt->sgl, iter, |
4015 | adapter->hma.sgt->orig_nents, i) { |
4016 | page = sg_page(sg: iter); |
4017 | if (page) |
4018 | __free_pages(page, HMA_PAGE_ORDER); |
4019 | } |
4020 | |
4021 | kfree(objp: adapter->hma.phy_addr); |
4022 | sg_free_table(adapter->hma.sgt); |
4023 | kfree(objp: adapter->hma.sgt); |
4024 | adapter->hma.sgt = NULL; |
4025 | } |
4026 | |
4027 | static int adap_config_hma(struct adapter *adapter) |
4028 | { |
4029 | struct scatterlist *sgl, *iter; |
4030 | struct sg_table *sgt; |
4031 | struct page *newpage; |
4032 | unsigned int i, j, k; |
4033 | u32 param, hma_size; |
4034 | unsigned int ncmds; |
4035 | size_t page_size; |
4036 | u32 page_order; |
4037 | int node, ret; |
4038 | |
4039 | /* HMA is supported only for T6+ cards. |
4040 | * Avoid initializing HMA in kdump kernels. |
4041 | */ |
4042 | if (is_kdump_kernel() || |
4043 | CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) |
4044 | return 0; |
4045 | |
4046 | /* Get the HMA region size required by fw */ |
4047 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4048 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE)); |
4049 | ret = t4_query_params(adap: adapter, mbox: adapter->mbox, pf: adapter->pf, vf: 0, |
4050 | nparams: 1, params: ¶m, val: &hma_size); |
4051 | /* An error means card has its own memory or HMA is not supported by |
4052 | * the firmware. Return without any errors. |
4053 | */ |
4054 | if (ret || !hma_size) |
4055 | return 0; |
4056 | |
4057 | if (hma_size < HMA_MIN_TOTAL_SIZE || |
4058 | hma_size > HMA_MAX_TOTAL_SIZE) { |
4059 | dev_err(adapter->pdev_dev, |
4060 | "HMA size %uMB beyond bounds(%u-%lu)MB\n" , |
4061 | hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE); |
4062 | return -EINVAL; |
4063 | } |
4064 | |
4065 | page_size = HMA_PAGE_SIZE; |
4066 | page_order = HMA_PAGE_ORDER; |
4067 | adapter->hma.sgt = kzalloc(size: sizeof(*adapter->hma.sgt), GFP_KERNEL); |
4068 | if (unlikely(!adapter->hma.sgt)) { |
4069 | dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n" ); |
4070 | return -ENOMEM; |
4071 | } |
4072 | sgt = adapter->hma.sgt; |
4073 | /* FW returned value will be in MB's |
4074 | */ |
4075 | sgt->orig_nents = (hma_size << 20) / (page_size << page_order); |
4076 | if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { |
4077 | dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n" ); |
4078 | kfree(objp: adapter->hma.sgt); |
4079 | adapter->hma.sgt = NULL; |
4080 | return -ENOMEM; |
4081 | } |
4082 | |
4083 | sgl = adapter->hma.sgt->sgl; |
4084 | node = dev_to_node(dev: adapter->pdev_dev); |
4085 | for_each_sg(sgl, iter, sgt->orig_nents, i) { |
4086 | newpage = alloc_pages_node(nid: node, __GFP_NOWARN | GFP_KERNEL | |
4087 | __GFP_ZERO, order: page_order); |
4088 | if (!newpage) { |
4089 | dev_err(adapter->pdev_dev, |
4090 | "Not enough memory for HMA page allocation\n" ); |
4091 | ret = -ENOMEM; |
4092 | goto free_hma; |
4093 | } |
4094 | sg_set_page(sg: iter, page: newpage, len: page_size << page_order, offset: 0); |
4095 | } |
4096 | |
4097 | sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, |
4098 | DMA_BIDIRECTIONAL); |
4099 | if (!sgt->nents) { |
4100 | dev_err(adapter->pdev_dev, |
4101 | "Not enough memory for HMA DMA mapping" ); |
4102 | ret = -ENOMEM; |
4103 | goto free_hma; |
4104 | } |
4105 | adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; |
4106 | |
4107 | adapter->hma.phy_addr = kcalloc(n: sgt->nents, size: sizeof(dma_addr_t), |
4108 | GFP_KERNEL); |
4109 | if (unlikely(!adapter->hma.phy_addr)) |
4110 | goto free_hma; |
4111 | |
4112 | for_each_sg(sgl, iter, sgt->nents, i) { |
4113 | newpage = sg_page(sg: iter); |
4114 | adapter->hma.phy_addr[i] = sg_dma_address(iter); |
4115 | } |
4116 | |
4117 | ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); |
4118 | /* Pass on the addresses to firmware */ |
4119 | for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) { |
4120 | struct fw_hma_cmd hma_cmd; |
4121 | u8 naddr = HMA_MAX_ADDR_IN_CMD; |
4122 | u8 soc = 0, eoc = 0; |
4123 | u8 hma_mode = 1; /* Presently we support only Page table mode */ |
4124 | |
4125 | soc = (i == 0) ? 1 : 0; |
4126 | eoc = (i == ncmds - 1) ? 1 : 0; |
4127 | |
4128 | /* For last cmd, set naddr corresponding to remaining |
4129 | * addresses |
4130 | */ |
4131 | if (i == ncmds - 1) { |
4132 | naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; |
4133 | naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD; |
4134 | } |
4135 | memset(&hma_cmd, 0, sizeof(hma_cmd)); |
4136 | hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) | |
4137 | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); |
4138 | hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd)); |
4139 | |
4140 | hma_cmd.mode_to_pcie_params = |
4141 | htonl(FW_HMA_CMD_MODE_V(hma_mode) | |
4142 | FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc)); |
4143 | |
4144 | /* HMA cmd size specified in MB's */ |
4145 | hma_cmd.naddr_size = |
4146 | htonl(FW_HMA_CMD_SIZE_V(hma_size) | |
4147 | FW_HMA_CMD_NADDR_V(naddr)); |
4148 | |
4149 | /* Total Page size specified in units of 4K */ |
4150 | hma_cmd.addr_size_pkd = |
4151 | htonl(FW_HMA_CMD_ADDR_SIZE_V |
4152 | ((page_size << page_order) >> 12)); |
4153 | |
4154 | /* Fill the 5 addresses */ |
4155 | for (j = 0; j < naddr; j++) { |
4156 | hma_cmd.phy_address[j] = |
4157 | cpu_to_be64(adapter->hma.phy_addr[j + k]); |
4158 | } |
4159 | ret = t4_wr_mbox(adap: adapter, mbox: adapter->mbox, cmd: &hma_cmd, |
4160 | size: sizeof(hma_cmd), rpl: &hma_cmd); |
4161 | if (ret) { |
4162 | dev_err(adapter->pdev_dev, |
4163 | "HMA FW command failed with err %d\n" , ret); |
4164 | goto free_hma; |
4165 | } |
4166 | } |
4167 | |
4168 | if (!ret) |
4169 | dev_info(adapter->pdev_dev, |
4170 | "Reserved %uMB host memory for HMA\n" , hma_size); |
4171 | return ret; |
4172 | |
4173 | free_hma: |
4174 | adap_free_hma_mem(adapter); |
4175 | return ret; |
4176 | } |
4177 | |
4178 | static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) |
4179 | { |
4180 | u32 v; |
4181 | int ret; |
4182 | |
4183 | /* Now that we've successfully configured and initialized the adapter |
4184 | * can ask the Firmware what resources it has provisioned for us. |
4185 | */ |
4186 | ret = t4_get_pfres(adapter: adap); |
4187 | if (ret) { |
4188 | dev_err(adap->pdev_dev, |
4189 | "Unable to retrieve resource provisioning information\n" ); |
4190 | return ret; |
4191 | } |
4192 | |
4193 | /* get device capabilities */ |
4194 | memset(c, 0, sizeof(*c)); |
4195 | c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4196 | FW_CMD_REQUEST_F | FW_CMD_READ_F); |
4197 | c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); |
4198 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: c, size: sizeof(*c), rpl: c); |
4199 | if (ret < 0) |
4200 | return ret; |
4201 | |
4202 | c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4203 | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); |
4204 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: c, size: sizeof(*c), NULL); |
4205 | if (ret < 0) |
4206 | return ret; |
4207 | |
4208 | ret = t4_config_glbl_rss(adapter: adap, mbox: adap->pf, |
4209 | FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, |
4210 | FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | |
4211 | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); |
4212 | if (ret < 0) |
4213 | return ret; |
4214 | |
4215 | ret = t4_cfg_pfvf(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, txq: adap->sge.egr_sz, txq_eth_ctrl: 64, |
4216 | rxqi: MAX_INGQ, rxq: 0, tc: 0, vi: 4, cmask: 0xf, pmask: 0xf, nexact: 16, rcaps: FW_CMD_CAP_PF, |
4217 | wxcaps: FW_CMD_CAP_PF); |
4218 | if (ret < 0) |
4219 | return ret; |
4220 | |
4221 | t4_sge_init(adap); |
4222 | |
4223 | /* tweak some settings */ |
4224 | t4_write_reg(adap, TP_SHIFT_CNT_A, val: 0x64f8849); |
4225 | t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); |
4226 | t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); |
4227 | v = t4_read_reg(adap, TP_PIO_DATA_A); |
4228 | t4_write_reg(adap, TP_PIO_DATA_A, val: v & ~CSUM_HAS_PSEUDO_HDR_F); |
4229 | |
4230 | /* first 4 Tx modulation queues point to consecutive Tx channels */ |
4231 | adap->params.tp.tx_modq_map = 0xE4; |
4232 | t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, |
4233 | TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); |
4234 | |
4235 | /* associate each Tx modulation queue with consecutive Tx channels */ |
4236 | v = 0x84218421; |
4237 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
4238 | vals: &v, nregs: 1, TP_TX_SCHED_HDR_A); |
4239 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
4240 | vals: &v, nregs: 1, TP_TX_SCHED_FIFO_A); |
4241 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
4242 | vals: &v, nregs: 1, TP_TX_SCHED_PCMD_A); |
4243 | |
4244 | #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ |
4245 | if (is_offload(adap)) { |
4246 | t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, |
4247 | TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4248 | TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4249 | TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4250 | TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); |
4251 | t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, |
4252 | TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4253 | TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4254 | TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | |
4255 | TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); |
4256 | } |
4257 | |
4258 | /* get basic stuff going */ |
4259 | return t4_early_init(adap, mbox: adap->pf); |
4260 | } |
4261 | |
4262 | /* |
4263 | * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. |
4264 | */ |
4265 | #define MAX_ATIDS 8192U |
4266 | |
4267 | /* |
4268 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. |
4269 | * |
4270 | * If the firmware we're dealing with has Configuration File support, then |
4271 | * we use that to perform all configuration |
4272 | */ |
4273 | |
4274 | /* |
4275 | * Tweak configuration based on module parameters, etc. Most of these have |
4276 | * defaults assigned to them by Firmware Configuration Files (if we're using |
4277 | * them) but need to be explicitly set if we're using hard-coded |
4278 | * initialization. But even in the case of using Firmware Configuration |
4279 | * Files, we'd like to expose the ability to change these via module |
4280 | * parameters so these are essentially common tweaks/settings for |
4281 | * Configuration Files and hard-coded initialization ... |
4282 | */ |
4283 | static int adap_init0_tweaks(struct adapter *adapter) |
4284 | { |
4285 | /* |
4286 | * Fix up various Host-Dependent Parameters like Page Size, Cache |
4287 | * Line Size, etc. The firmware default is for a 4KB Page Size and |
4288 | * 64B Cache Line Size ... |
4289 | */ |
4290 | t4_fixup_host_params(adap: adapter, PAGE_SIZE, L1_CACHE_BYTES); |
4291 | |
4292 | /* |
4293 | * Process module parameters which affect early initialization. |
4294 | */ |
4295 | if (rx_dma_offset != 2 && rx_dma_offset != 0) { |
4296 | dev_err(&adapter->pdev->dev, |
4297 | "Ignoring illegal rx_dma_offset=%d, using 2\n" , |
4298 | rx_dma_offset); |
4299 | rx_dma_offset = 2; |
4300 | } |
4301 | t4_set_reg_field(adap: adapter, SGE_CONTROL_A, |
4302 | PKTSHIFT_V(PKTSHIFT_M), |
4303 | PKTSHIFT_V(rx_dma_offset)); |
4304 | |
4305 | /* |
4306 | * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux |
4307 | * adds the pseudo header itself. |
4308 | */ |
4309 | t4_tp_wr_bits_indirect(adap: adapter, TP_INGRESS_CONFIG_A, |
4310 | CSUM_HAS_PSEUDO_HDR_F, val: 0); |
4311 | |
4312 | return 0; |
4313 | } |
4314 | |
4315 | /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips |
4316 | * unto themselves and they contain their own firmware to perform their |
4317 | * tasks ... |
4318 | */ |
4319 | static int phy_aq1202_version(const u8 *phy_fw_data, |
4320 | size_t phy_fw_size) |
4321 | { |
4322 | int offset; |
4323 | |
4324 | /* At offset 0x8 you're looking for the primary image's |
4325 | * starting offset which is 3 Bytes wide |
4326 | * |
4327 | * At offset 0xa of the primary image, you look for the offset |
4328 | * of the DRAM segment which is 3 Bytes wide. |
4329 | * |
4330 | * The FW version is at offset 0x27e of the DRAM and is 2 Bytes |
4331 | * wide |
4332 | */ |
4333 | #define be16(__p) (((__p)[0] << 8) | (__p)[1]) |
4334 | #define le16(__p) ((__p)[0] | ((__p)[1] << 8)) |
4335 | #define le24(__p) (le16(__p) | ((__p)[2] << 16)) |
4336 | |
4337 | offset = le24(phy_fw_data + 0x8) << 12; |
4338 | offset = le24(phy_fw_data + offset + 0xa); |
4339 | return be16(phy_fw_data + offset + 0x27e); |
4340 | |
4341 | #undef be16 |
4342 | #undef le16 |
4343 | #undef le24 |
4344 | } |
4345 | |
4346 | static struct info_10gbt_phy_fw { |
4347 | unsigned int phy_fw_id; /* PCI Device ID */ |
4348 | char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */ |
4349 | int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size); |
4350 | int phy_flash; /* Has FLASH for PHY Firmware */ |
4351 | } phy_info_array[] = { |
4352 | { |
4353 | PHY_AQ1202_DEVICEID, |
4354 | PHY_AQ1202_FIRMWARE, |
4355 | phy_aq1202_version, |
4356 | 1, |
4357 | }, |
4358 | { |
4359 | PHY_BCM84834_DEVICEID, |
4360 | PHY_BCM84834_FIRMWARE, |
4361 | NULL, |
4362 | 0, |
4363 | }, |
4364 | { 0, NULL, NULL }, |
4365 | }; |
4366 | |
4367 | static struct info_10gbt_phy_fw *find_phy_info(int devid) |
4368 | { |
4369 | int i; |
4370 | |
4371 | for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) { |
4372 | if (phy_info_array[i].phy_fw_id == devid) |
4373 | return &phy_info_array[i]; |
4374 | } |
4375 | return NULL; |
4376 | } |
4377 | |
4378 | /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to |
4379 | * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error |
4380 | * we return a negative error number. If we transfer new firmware we return 1 |
4381 | * (from t4_load_phy_fw()). If we don't do anything we return 0. |
4382 | */ |
4383 | static int adap_init0_phy(struct adapter *adap) |
4384 | { |
4385 | const struct firmware *phyf; |
4386 | int ret; |
4387 | struct info_10gbt_phy_fw *phy_info; |
4388 | |
4389 | /* Use the device ID to determine which PHY file to flash. |
4390 | */ |
4391 | phy_info = find_phy_info(devid: adap->pdev->device); |
4392 | if (!phy_info) { |
4393 | dev_warn(adap->pdev_dev, |
4394 | "No PHY Firmware file found for this PHY\n" ); |
4395 | return -EOPNOTSUPP; |
4396 | } |
4397 | |
4398 | /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then |
4399 | * use that. The adapter firmware provides us with a memory buffer |
4400 | * where we can load a PHY firmware file from the host if we want to |
4401 | * override the PHY firmware File in flash. |
4402 | */ |
4403 | ret = request_firmware_direct(fw: &phyf, name: phy_info->phy_fw_file, |
4404 | device: adap->pdev_dev); |
4405 | if (ret < 0) { |
4406 | /* For adapters without FLASH attached to PHY for their |
4407 | * firmware, it's obviously a fatal error if we can't get the |
4408 | * firmware to the adapter. For adapters with PHY firmware |
4409 | * FLASH storage, it's worth a warning if we can't find the |
4410 | * PHY Firmware but we'll neuter the error ... |
4411 | */ |
4412 | dev_err(adap->pdev_dev, "unable to find PHY Firmware image " |
4413 | "/lib/firmware/%s, error %d\n" , |
4414 | phy_info->phy_fw_file, -ret); |
4415 | if (phy_info->phy_flash) { |
4416 | int cur_phy_fw_ver = 0; |
4417 | |
4418 | t4_phy_fw_ver(adap, phy_fw_ver: &cur_phy_fw_ver); |
4419 | dev_warn(adap->pdev_dev, "continuing with, on-adapter " |
4420 | "FLASH copy, version %#x\n" , cur_phy_fw_ver); |
4421 | ret = 0; |
4422 | } |
4423 | |
4424 | return ret; |
4425 | } |
4426 | |
4427 | /* Load PHY Firmware onto adapter. |
4428 | */ |
4429 | ret = t4_load_phy_fw(adap, win: MEMWIN_NIC, phy_fw_version: phy_info->phy_fw_version, |
4430 | phy_fw_data: (u8 *)phyf->data, phy_fw_size: phyf->size); |
4431 | if (ret < 0) |
4432 | dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n" , |
4433 | -ret); |
4434 | else if (ret > 0) { |
4435 | int new_phy_fw_ver = 0; |
4436 | |
4437 | if (phy_info->phy_fw_version) |
4438 | new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, |
4439 | phyf->size); |
4440 | dev_info(adap->pdev_dev, "Successfully transferred PHY " |
4441 | "Firmware /lib/firmware/%s, version %#x\n" , |
4442 | phy_info->phy_fw_file, new_phy_fw_ver); |
4443 | } |
4444 | |
4445 | release_firmware(fw: phyf); |
4446 | |
4447 | return ret; |
4448 | } |
4449 | |
4450 | /* |
4451 | * Attempt to initialize the adapter via a Firmware Configuration File. |
4452 | */ |
4453 | static int adap_init0_config(struct adapter *adapter, int reset) |
4454 | { |
4455 | char *fw_config_file, fw_config_file_path[256]; |
4456 | u32 finiver, finicsum, cfcsum, param, val; |
4457 | struct fw_caps_config_cmd caps_cmd; |
4458 | unsigned long mtype = 0, maddr = 0; |
4459 | const struct firmware *cf; |
4460 | char *config_name = NULL; |
4461 | int config_issued = 0; |
4462 | int ret; |
4463 | |
4464 | /* |
4465 | * Reset device if necessary. |
4466 | */ |
4467 | if (reset) { |
4468 | ret = t4_fw_reset(adap: adapter, mbox: adapter->mbox, |
4469 | PIORSTMODE_F | PIORST_F); |
4470 | if (ret < 0) |
4471 | goto bye; |
4472 | } |
4473 | |
4474 | /* If this is a 10Gb/s-BT adapter make sure the chip-external |
4475 | * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs |
4476 | * to be performed after any global adapter RESET above since some |
4477 | * PHYs only have local RAM copies of the PHY firmware. |
4478 | */ |
4479 | if (is_10gbt_device(device: adapter->pdev->device)) { |
4480 | ret = adap_init0_phy(adap: adapter); |
4481 | if (ret < 0) |
4482 | goto bye; |
4483 | } |
4484 | /* |
4485 | * If we have a T4 configuration file under /lib/firmware/cxgb4/, |
4486 | * then use that. Otherwise, use the configuration file stored |
4487 | * in the adapter flash ... |
4488 | */ |
4489 | switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { |
4490 | case CHELSIO_T4: |
4491 | fw_config_file = FW4_CFNAME; |
4492 | break; |
4493 | case CHELSIO_T5: |
4494 | fw_config_file = FW5_CFNAME; |
4495 | break; |
4496 | case CHELSIO_T6: |
4497 | fw_config_file = FW6_CFNAME; |
4498 | break; |
4499 | default: |
4500 | dev_err(adapter->pdev_dev, "Device %d is not supported\n" , |
4501 | adapter->pdev->device); |
4502 | ret = -EINVAL; |
4503 | goto bye; |
4504 | } |
4505 | |
4506 | ret = request_firmware(fw: &cf, name: fw_config_file, device: adapter->pdev_dev); |
4507 | if (ret < 0) { |
4508 | config_name = "On FLASH" ; |
4509 | mtype = FW_MEMTYPE_CF_FLASH; |
4510 | maddr = t4_flash_cfg_addr(adapter); |
4511 | } else { |
4512 | u32 params[7], val[7]; |
4513 | |
4514 | sprintf(buf: fw_config_file_path, |
4515 | fmt: "/lib/firmware/%s" , fw_config_file); |
4516 | config_name = fw_config_file_path; |
4517 | |
4518 | if (cf->size >= FLASH_CFG_MAX_SIZE) |
4519 | ret = -ENOMEM; |
4520 | else { |
4521 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4522 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); |
4523 | ret = t4_query_params(adap: adapter, mbox: adapter->mbox, |
4524 | pf: adapter->pf, vf: 0, nparams: 1, params, val); |
4525 | if (ret == 0) { |
4526 | /* |
4527 | * For t4_memory_rw() below addresses and |
4528 | * sizes have to be in terms of multiples of 4 |
4529 | * bytes. So, if the Configuration File isn't |
4530 | * a multiple of 4 bytes in length we'll have |
4531 | * to write that out separately since we can't |
4532 | * guarantee that the bytes following the |
4533 | * residual byte in the buffer returned by |
4534 | * request_firmware() are zeroed out ... |
4535 | */ |
4536 | size_t resid = cf->size & 0x3; |
4537 | size_t size = cf->size & ~0x3; |
4538 | __be32 *data = (__be32 *)cf->data; |
4539 | |
4540 | mtype = FW_PARAMS_PARAM_Y_G(val[0]); |
4541 | maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; |
4542 | |
4543 | spin_lock(lock: &adapter->win0_lock); |
4544 | ret = t4_memory_rw(adap: adapter, win: 0, mtype, addr: maddr, |
4545 | len: size, buf: data, T4_MEMORY_WRITE); |
4546 | if (ret == 0 && resid != 0) { |
4547 | union { |
4548 | __be32 word; |
4549 | char buf[4]; |
4550 | } last; |
4551 | int i; |
4552 | |
4553 | last.word = data[size >> 2]; |
4554 | for (i = resid; i < 4; i++) |
4555 | last.buf[i] = 0; |
4556 | ret = t4_memory_rw(adap: adapter, win: 0, mtype, |
4557 | addr: maddr + size, |
4558 | len: 4, buf: &last.word, |
4559 | T4_MEMORY_WRITE); |
4560 | } |
4561 | spin_unlock(lock: &adapter->win0_lock); |
4562 | } |
4563 | } |
4564 | |
4565 | release_firmware(fw: cf); |
4566 | if (ret) |
4567 | goto bye; |
4568 | } |
4569 | |
4570 | val = 0; |
4571 | |
4572 | /* Ofld + Hash filter is supported. Older fw will fail this request and |
4573 | * it is fine. |
4574 | */ |
4575 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4576 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD)); |
4577 | ret = t4_set_params(adap: adapter, mbox: adapter->mbox, pf: adapter->pf, vf: 0, |
4578 | nparams: 1, params: ¶m, val: &val); |
4579 | |
4580 | /* FW doesn't know about Hash filter + ofld support, |
4581 | * it's not a problem, don't return an error. |
4582 | */ |
4583 | if (ret < 0) { |
4584 | dev_warn(adapter->pdev_dev, |
4585 | "Hash filter with ofld is not supported by FW\n" ); |
4586 | } |
4587 | |
4588 | /* |
4589 | * Issue a Capability Configuration command to the firmware to get it |
4590 | * to parse the Configuration File. We don't use t4_fw_config_file() |
4591 | * because we want the ability to modify various features after we've |
4592 | * processed the configuration file ... |
4593 | */ |
4594 | memset(&caps_cmd, 0, sizeof(caps_cmd)); |
4595 | caps_cmd.op_to_write = |
4596 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4597 | FW_CMD_REQUEST_F | |
4598 | FW_CMD_READ_F); |
4599 | caps_cmd.cfvalid_to_len16 = |
4600 | htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | |
4601 | FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | |
4602 | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | |
4603 | FW_LEN16(caps_cmd)); |
4604 | ret = t4_wr_mbox(adap: adapter, mbox: adapter->mbox, cmd: &caps_cmd, size: sizeof(caps_cmd), |
4605 | rpl: &caps_cmd); |
4606 | |
4607 | /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware |
4608 | * Configuration File in FLASH), our last gasp effort is to use the |
4609 | * Firmware Configuration File which is embedded in the firmware. A |
4610 | * very few early versions of the firmware didn't have one embedded |
4611 | * but we can ignore those. |
4612 | */ |
4613 | if (ret == -ENOENT) { |
4614 | memset(&caps_cmd, 0, sizeof(caps_cmd)); |
4615 | caps_cmd.op_to_write = |
4616 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4617 | FW_CMD_REQUEST_F | |
4618 | FW_CMD_READ_F); |
4619 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
4620 | ret = t4_wr_mbox(adap: adapter, mbox: adapter->mbox, cmd: &caps_cmd, |
4621 | size: sizeof(caps_cmd), rpl: &caps_cmd); |
4622 | config_name = "Firmware Default" ; |
4623 | } |
4624 | |
4625 | config_issued = 1; |
4626 | if (ret < 0) |
4627 | goto bye; |
4628 | |
4629 | finiver = ntohl(caps_cmd.finiver); |
4630 | finicsum = ntohl(caps_cmd.finicsum); |
4631 | cfcsum = ntohl(caps_cmd.cfcsum); |
4632 | if (finicsum != cfcsum) |
4633 | dev_warn(adapter->pdev_dev, "Configuration File checksum " \ |
4634 | "mismatch: [fini] csum=%#x, computed csum=%#x\n" , |
4635 | finicsum, cfcsum); |
4636 | |
4637 | /* |
4638 | * And now tell the firmware to use the configuration we just loaded. |
4639 | */ |
4640 | caps_cmd.op_to_write = |
4641 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4642 | FW_CMD_REQUEST_F | |
4643 | FW_CMD_WRITE_F); |
4644 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
4645 | ret = t4_wr_mbox(adap: adapter, mbox: adapter->mbox, cmd: &caps_cmd, size: sizeof(caps_cmd), |
4646 | NULL); |
4647 | if (ret < 0) |
4648 | goto bye; |
4649 | |
4650 | /* |
4651 | * Tweak configuration based on system architecture, module |
4652 | * parameters, etc. |
4653 | */ |
4654 | ret = adap_init0_tweaks(adapter); |
4655 | if (ret < 0) |
4656 | goto bye; |
4657 | |
4658 | /* We will proceed even if HMA init fails. */ |
4659 | ret = adap_config_hma(adapter); |
4660 | if (ret) |
4661 | dev_err(adapter->pdev_dev, |
4662 | "HMA configuration failed with error %d\n" , ret); |
4663 | |
4664 | if (is_t6(chip: adapter->params.chip)) { |
4665 | adap_config_hpfilter(adapter); |
4666 | ret = setup_ppod_edram(adapter); |
4667 | if (!ret) |
4668 | dev_info(adapter->pdev_dev, "Successfully enabled " |
4669 | "ppod edram feature\n" ); |
4670 | } |
4671 | |
4672 | /* |
4673 | * And finally tell the firmware to initialize itself using the |
4674 | * parameters from the Configuration File. |
4675 | */ |
4676 | ret = t4_fw_initialize(adap: adapter, mbox: adapter->mbox); |
4677 | if (ret < 0) |
4678 | goto bye; |
4679 | |
4680 | /* Emit Firmware Configuration File information and return |
4681 | * successfully. |
4682 | */ |
4683 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware " \ |
4684 | "Configuration File \"%s\", version %#x, computed checksum %#x\n" , |
4685 | config_name, finiver, cfcsum); |
4686 | return 0; |
4687 | |
4688 | /* |
4689 | * Something bad happened. Return the error ... (If the "error" |
4690 | * is that there's no Configuration File on the adapter we don't |
4691 | * want to issue a warning since this is fairly common.) |
4692 | */ |
4693 | bye: |
4694 | if (config_issued && ret != -ENOENT) |
4695 | dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n" , |
4696 | config_name, -ret); |
4697 | return ret; |
4698 | } |
4699 | |
4700 | static struct fw_info fw_info_array[] = { |
4701 | { |
4702 | .chip = CHELSIO_T4, |
4703 | .fs_name = FW4_CFNAME, |
4704 | .fw_mod_name = FW4_FNAME, |
4705 | .fw_hdr = { |
4706 | .chip = FW_HDR_CHIP_T4, |
4707 | .fw_ver = __cpu_to_be32(FW_VERSION(T4)), |
4708 | .intfver_nic = FW_INTFVER(T4, NIC), |
4709 | .intfver_vnic = FW_INTFVER(T4, VNIC), |
4710 | .intfver_ri = FW_INTFVER(T4, RI), |
4711 | .intfver_iscsi = FW_INTFVER(T4, ISCSI), |
4712 | .intfver_fcoe = FW_INTFVER(T4, FCOE), |
4713 | }, |
4714 | }, { |
4715 | .chip = CHELSIO_T5, |
4716 | .fs_name = FW5_CFNAME, |
4717 | .fw_mod_name = FW5_FNAME, |
4718 | .fw_hdr = { |
4719 | .chip = FW_HDR_CHIP_T5, |
4720 | .fw_ver = __cpu_to_be32(FW_VERSION(T5)), |
4721 | .intfver_nic = FW_INTFVER(T5, NIC), |
4722 | .intfver_vnic = FW_INTFVER(T5, VNIC), |
4723 | .intfver_ri = FW_INTFVER(T5, RI), |
4724 | .intfver_iscsi = FW_INTFVER(T5, ISCSI), |
4725 | .intfver_fcoe = FW_INTFVER(T5, FCOE), |
4726 | }, |
4727 | }, { |
4728 | .chip = CHELSIO_T6, |
4729 | .fs_name = FW6_CFNAME, |
4730 | .fw_mod_name = FW6_FNAME, |
4731 | .fw_hdr = { |
4732 | .chip = FW_HDR_CHIP_T6, |
4733 | .fw_ver = __cpu_to_be32(FW_VERSION(T6)), |
4734 | .intfver_nic = FW_INTFVER(T6, NIC), |
4735 | .intfver_vnic = FW_INTFVER(T6, VNIC), |
4736 | .intfver_ofld = FW_INTFVER(T6, OFLD), |
4737 | .intfver_ri = FW_INTFVER(T6, RI), |
4738 | .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), |
4739 | .intfver_iscsi = FW_INTFVER(T6, ISCSI), |
4740 | .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), |
4741 | .intfver_fcoe = FW_INTFVER(T6, FCOE), |
4742 | }, |
4743 | } |
4744 | |
4745 | }; |
4746 | |
4747 | static struct fw_info *find_fw_info(int chip) |
4748 | { |
4749 | int i; |
4750 | |
4751 | for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { |
4752 | if (fw_info_array[i].chip == chip) |
4753 | return &fw_info_array[i]; |
4754 | } |
4755 | return NULL; |
4756 | } |
4757 | |
4758 | /* |
4759 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. |
4760 | */ |
4761 | static int adap_init0(struct adapter *adap, int vpd_skip) |
4762 | { |
4763 | struct fw_caps_config_cmd caps_cmd; |
4764 | u32 params[7], val[7]; |
4765 | enum dev_state state; |
4766 | u32 v, port_vec; |
4767 | int reset = 1; |
4768 | int ret; |
4769 | |
4770 | /* Grab Firmware Device Log parameters as early as possible so we have |
4771 | * access to it for debugging, etc. |
4772 | */ |
4773 | ret = t4_init_devlog_params(adapter: adap); |
4774 | if (ret < 0) |
4775 | return ret; |
4776 | |
4777 | /* Contact FW, advertising Master capability */ |
4778 | ret = t4_fw_hello(adap, mbox: adap->mbox, evt_mbox: adap->mbox, |
4779 | master: is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, state: &state); |
4780 | if (ret < 0) { |
4781 | dev_err(adap->pdev_dev, "could not connect to FW, error %d\n" , |
4782 | ret); |
4783 | return ret; |
4784 | } |
4785 | if (ret == adap->mbox) |
4786 | adap->flags |= CXGB4_MASTER_PF; |
4787 | |
4788 | /* |
4789 | * If we're the Master PF Driver and the device is uninitialized, |
4790 | * then let's consider upgrading the firmware ... (We always want |
4791 | * to check the firmware version number in order to A. get it for |
4792 | * later reporting and B. to warn if the currently loaded firmware |
4793 | * is excessively mismatched relative to the driver.) |
4794 | */ |
4795 | |
4796 | t4_get_version_info(adapter: adap); |
4797 | ret = t4_check_fw_version(adap); |
4798 | /* If firmware is too old (not supported by driver) force an update. */ |
4799 | if (ret) |
4800 | state = DEV_STATE_UNINIT; |
4801 | if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { |
4802 | struct fw_info *fw_info; |
4803 | struct fw_hdr *card_fw; |
4804 | const struct firmware *fw; |
4805 | const u8 *fw_data = NULL; |
4806 | unsigned int fw_size = 0; |
4807 | |
4808 | /* This is the firmware whose headers the driver was compiled |
4809 | * against |
4810 | */ |
4811 | fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); |
4812 | if (fw_info == NULL) { |
4813 | dev_err(adap->pdev_dev, |
4814 | "unable to get firmware info for chip %d.\n" , |
4815 | CHELSIO_CHIP_VERSION(adap->params.chip)); |
4816 | return -EINVAL; |
4817 | } |
4818 | |
4819 | /* allocate memory to read the header of the firmware on the |
4820 | * card |
4821 | */ |
4822 | card_fw = kvzalloc(size: sizeof(*card_fw), GFP_KERNEL); |
4823 | if (!card_fw) { |
4824 | ret = -ENOMEM; |
4825 | goto bye; |
4826 | } |
4827 | |
4828 | /* Get FW from from /lib/firmware/ */ |
4829 | ret = request_firmware(fw: &fw, name: fw_info->fw_mod_name, |
4830 | device: adap->pdev_dev); |
4831 | if (ret < 0) { |
4832 | dev_err(adap->pdev_dev, |
4833 | "unable to load firmware image %s, error %d\n" , |
4834 | fw_info->fw_mod_name, ret); |
4835 | } else { |
4836 | fw_data = fw->data; |
4837 | fw_size = fw->size; |
4838 | } |
4839 | |
4840 | /* upgrade FW logic */ |
4841 | ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, |
4842 | state, reset: &reset); |
4843 | |
4844 | /* Cleaning up */ |
4845 | release_firmware(fw); |
4846 | kvfree(addr: card_fw); |
4847 | |
4848 | if (ret < 0) |
4849 | goto bye; |
4850 | } |
4851 | |
4852 | /* If the firmware is initialized already, emit a simply note to that |
4853 | * effect. Otherwise, it's time to try initializing the adapter. |
4854 | */ |
4855 | if (state == DEV_STATE_INIT) { |
4856 | ret = adap_config_hma(adapter: adap); |
4857 | if (ret) |
4858 | dev_err(adap->pdev_dev, |
4859 | "HMA configuration failed with error %d\n" , |
4860 | ret); |
4861 | dev_info(adap->pdev_dev, "Coming up as %s: " \ |
4862 | "Adapter already initialized\n" , |
4863 | adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE" ); |
4864 | } else { |
4865 | dev_info(adap->pdev_dev, "Coming up as MASTER: " \ |
4866 | "Initializing adapter\n" ); |
4867 | |
4868 | /* Find out whether we're dealing with a version of the |
4869 | * firmware which has configuration file support. |
4870 | */ |
4871 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4872 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); |
4873 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, |
4874 | params, val); |
4875 | |
4876 | /* If the firmware doesn't support Configuration Files, |
4877 | * return an error. |
4878 | */ |
4879 | if (ret < 0) { |
4880 | dev_err(adap->pdev_dev, "firmware doesn't support " |
4881 | "Firmware Configuration Files\n" ); |
4882 | goto bye; |
4883 | } |
4884 | |
4885 | /* The firmware provides us with a memory buffer where we can |
4886 | * load a Configuration File from the host if we want to |
4887 | * override the Configuration File in flash. |
4888 | */ |
4889 | ret = adap_init0_config(adapter: adap, reset); |
4890 | if (ret == -ENOENT) { |
4891 | dev_err(adap->pdev_dev, "no Configuration File " |
4892 | "present on adapter.\n" ); |
4893 | goto bye; |
4894 | } |
4895 | if (ret < 0) { |
4896 | dev_err(adap->pdev_dev, "could not initialize " |
4897 | "adapter, error %d\n" , -ret); |
4898 | goto bye; |
4899 | } |
4900 | } |
4901 | |
4902 | /* Now that we've successfully configured and initialized the adapter |
4903 | * (or found it already initialized), we can ask the Firmware what |
4904 | * resources it has provisioned for us. |
4905 | */ |
4906 | ret = t4_get_pfres(adapter: adap); |
4907 | if (ret) { |
4908 | dev_err(adap->pdev_dev, |
4909 | "Unable to retrieve resource provisioning information\n" ); |
4910 | goto bye; |
4911 | } |
4912 | |
4913 | /* Grab VPD parameters. This should be done after we establish a |
4914 | * connection to the firmware since some of the VPD parameters |
4915 | * (notably the Core Clock frequency) are retrieved via requests to |
4916 | * the firmware. On the other hand, we need these fairly early on |
4917 | * so we do this right after getting ahold of the firmware. |
4918 | * |
4919 | * We need to do this after initializing the adapter because someone |
4920 | * could have FLASHed a new VPD which won't be read by the firmware |
4921 | * until we do the RESET ... |
4922 | */ |
4923 | if (!vpd_skip) { |
4924 | ret = t4_get_vpd_params(adapter: adap, p: &adap->params.vpd); |
4925 | if (ret < 0) |
4926 | goto bye; |
4927 | } |
4928 | |
4929 | /* Find out what ports are available to us. Note that we need to do |
4930 | * this before calling adap_init0_no_config() since it needs nports |
4931 | * and portvec ... |
4932 | */ |
4933 | v = |
4934 | FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4935 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); |
4936 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params: &v, val: &port_vec); |
4937 | if (ret < 0) |
4938 | goto bye; |
4939 | |
4940 | adap->params.nports = hweight32(port_vec); |
4941 | adap->params.portvec = port_vec; |
4942 | |
4943 | /* Give the SGE code a chance to pull in anything that it needs ... |
4944 | * Note that this must be called after we retrieve our VPD parameters |
4945 | * in order to know how to convert core ticks to seconds, etc. |
4946 | */ |
4947 | ret = t4_sge_init(adap); |
4948 | if (ret < 0) |
4949 | goto bye; |
4950 | |
4951 | /* Grab the SGE Doorbell Queue Timer values. If successful, that |
4952 | * indicates that the Firmware and Hardware support this. |
4953 | */ |
4954 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
4955 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); |
4956 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4957 | nparams: 1, params, val); |
4958 | |
4959 | if (!ret) { |
4960 | adap->sge.dbqtimer_tick = val[0]; |
4961 | ret = t4_read_sge_dbqtimers(adap, |
4962 | ARRAY_SIZE(adap->sge.dbqtimer_val), |
4963 | dbqtimers: adap->sge.dbqtimer_val); |
4964 | } |
4965 | |
4966 | if (!ret) |
4967 | adap->flags |= CXGB4_SGE_DBQ_TIMER; |
4968 | |
4969 | if (is_bypass_device(device: adap->pdev->device)) |
4970 | adap->params.bypass = 1; |
4971 | |
4972 | /* |
4973 | * Grab some of our basic fundamental operating parameters. |
4974 | */ |
4975 | params[0] = FW_PARAM_PFVF(EQ_START); |
4976 | params[1] = FW_PARAM_PFVF(L2T_START); |
4977 | params[2] = FW_PARAM_PFVF(L2T_END); |
4978 | params[3] = FW_PARAM_PFVF(FILTER_START); |
4979 | params[4] = FW_PARAM_PFVF(FILTER_END); |
4980 | params[5] = FW_PARAM_PFVF(IQFLINT_START); |
4981 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 6, params, val); |
4982 | if (ret < 0) |
4983 | goto bye; |
4984 | adap->sge.egr_start = val[0]; |
4985 | adap->l2t_start = val[1]; |
4986 | adap->l2t_end = val[2]; |
4987 | adap->tids.ftid_base = val[3]; |
4988 | adap->tids.nftids = val[4] - val[3] + 1; |
4989 | adap->sge.ingr_start = val[5]; |
4990 | |
4991 | if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { |
4992 | params[0] = FW_PARAM_PFVF(HPFILTER_START); |
4993 | params[1] = FW_PARAM_PFVF(HPFILTER_END); |
4994 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
4995 | params, val); |
4996 | if (ret < 0) |
4997 | goto bye; |
4998 | |
4999 | adap->tids.hpftid_base = val[0]; |
5000 | adap->tids.nhpftids = val[1] - val[0] + 1; |
5001 | |
5002 | /* Read the raw mps entries. In T6, the last 2 tcam entries |
5003 | * are reserved for raw mac addresses (rawf = 2, one per port). |
5004 | */ |
5005 | params[0] = FW_PARAM_PFVF(RAWF_START); |
5006 | params[1] = FW_PARAM_PFVF(RAWF_END); |
5007 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
5008 | params, val); |
5009 | if (ret == 0) { |
5010 | adap->rawf_start = val[0]; |
5011 | adap->rawf_cnt = val[1] - val[0] + 1; |
5012 | } |
5013 | |
5014 | adap->tids.tid_base = |
5015 | t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); |
5016 | } |
5017 | |
5018 | /* qids (ingress/egress) returned from firmware can be anywhere |
5019 | * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. |
5020 | * Hence driver needs to allocate memory for this range to |
5021 | * store the queue info. Get the highest IQFLINT/EQ index returned |
5022 | * in FW_EQ_*_CMD.alloc command. |
5023 | */ |
5024 | params[0] = FW_PARAM_PFVF(EQ_END); |
5025 | params[1] = FW_PARAM_PFVF(IQFLINT_END); |
5026 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, params, val); |
5027 | if (ret < 0) |
5028 | goto bye; |
5029 | adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; |
5030 | adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; |
5031 | |
5032 | adap->sge.egr_map = kcalloc(n: adap->sge.egr_sz, |
5033 | size: sizeof(*adap->sge.egr_map), GFP_KERNEL); |
5034 | if (!adap->sge.egr_map) { |
5035 | ret = -ENOMEM; |
5036 | goto bye; |
5037 | } |
5038 | |
5039 | adap->sge.ingr_map = kcalloc(n: adap->sge.ingr_sz, |
5040 | size: sizeof(*adap->sge.ingr_map), GFP_KERNEL); |
5041 | if (!adap->sge.ingr_map) { |
5042 | ret = -ENOMEM; |
5043 | goto bye; |
5044 | } |
5045 | |
5046 | /* Allocate the memory for the vaious egress queue bitmaps |
5047 | * ie starving_fl, txq_maperr and blocked_fl. |
5048 | */ |
5049 | adap->sge.starving_fl = bitmap_zalloc(nbits: adap->sge.egr_sz, GFP_KERNEL); |
5050 | if (!adap->sge.starving_fl) { |
5051 | ret = -ENOMEM; |
5052 | goto bye; |
5053 | } |
5054 | |
5055 | adap->sge.txq_maperr = bitmap_zalloc(nbits: adap->sge.egr_sz, GFP_KERNEL); |
5056 | if (!adap->sge.txq_maperr) { |
5057 | ret = -ENOMEM; |
5058 | goto bye; |
5059 | } |
5060 | |
5061 | #ifdef CONFIG_DEBUG_FS |
5062 | adap->sge.blocked_fl = bitmap_zalloc(nbits: adap->sge.egr_sz, GFP_KERNEL); |
5063 | if (!adap->sge.blocked_fl) { |
5064 | ret = -ENOMEM; |
5065 | goto bye; |
5066 | } |
5067 | #endif |
5068 | |
5069 | params[0] = FW_PARAM_PFVF(CLIP_START); |
5070 | params[1] = FW_PARAM_PFVF(CLIP_END); |
5071 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, params, val); |
5072 | if (ret < 0) |
5073 | goto bye; |
5074 | adap->clipt_start = val[0]; |
5075 | adap->clipt_end = val[1]; |
5076 | |
5077 | /* Get the supported number of traffic classes */ |
5078 | params[0] = FW_PARAM_DEV(NUM_TM_CLASS); |
5079 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params, val); |
5080 | if (ret < 0) { |
5081 | /* We couldn't retrieve the number of Traffic Classes |
5082 | * supported by the hardware/firmware. So we hard |
5083 | * code it here. |
5084 | */ |
5085 | adap->params.nsched_cls = is_t4(chip: adap->params.chip) ? 15 : 16; |
5086 | } else { |
5087 | adap->params.nsched_cls = val[0]; |
5088 | } |
5089 | |
5090 | /* query params related to active filter region */ |
5091 | params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); |
5092 | params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); |
5093 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, params, val); |
5094 | /* If Active filter size is set we enable establishing |
5095 | * offload connection through firmware work request |
5096 | */ |
5097 | if ((val[0] != val[1]) && (ret >= 0)) { |
5098 | adap->flags |= CXGB4_FW_OFLD_CONN; |
5099 | adap->tids.aftid_base = val[0]; |
5100 | adap->tids.aftid_end = val[1]; |
5101 | } |
5102 | |
5103 | /* If we're running on newer firmware, let it know that we're |
5104 | * prepared to deal with encapsulated CPL messages. Older |
5105 | * firmware won't understand this and we'll just get |
5106 | * unencapsulated messages ... |
5107 | */ |
5108 | params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); |
5109 | val[0] = 1; |
5110 | (void)t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params, val); |
5111 | |
5112 | /* |
5113 | * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL |
5114 | * capability. Earlier versions of the firmware didn't have the |
5115 | * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no |
5116 | * permission to use ULPTX MEMWRITE DSGL. |
5117 | */ |
5118 | if (is_t4(chip: adap->params.chip)) { |
5119 | adap->params.ulptx_memwrite_dsgl = false; |
5120 | } else { |
5121 | params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); |
5122 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5123 | nparams: 1, params, val); |
5124 | adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); |
5125 | } |
5126 | |
5127 | /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */ |
5128 | params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); |
5129 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5130 | nparams: 1, params, val); |
5131 | adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); |
5132 | |
5133 | /* See if FW supports FW_FILTER2 work request */ |
5134 | if (is_t4(chip: adap->params.chip)) { |
5135 | adap->params.filter2_wr_support = false; |
5136 | } else { |
5137 | params[0] = FW_PARAM_DEV(FILTER2_WR); |
5138 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5139 | nparams: 1, params, val); |
5140 | adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); |
5141 | } |
5142 | |
5143 | /* Check if FW supports returning vin and smt index. |
5144 | * If this is not supported, driver will interpret |
5145 | * these values from viid. |
5146 | */ |
5147 | params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); |
5148 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5149 | nparams: 1, params, val); |
5150 | adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); |
5151 | |
5152 | /* |
5153 | * Get device capabilities so we can determine what resources we need |
5154 | * to manage. |
5155 | */ |
5156 | memset(&caps_cmd, 0, sizeof(caps_cmd)); |
5157 | caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
5158 | FW_CMD_REQUEST_F | FW_CMD_READ_F); |
5159 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
5160 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &caps_cmd, size: sizeof(caps_cmd), |
5161 | rpl: &caps_cmd); |
5162 | if (ret < 0) |
5163 | goto bye; |
5164 | |
5165 | /* hash filter has some mandatory register settings to be tested and for |
5166 | * that it needs to test whether offload is enabled or not, hence |
5167 | * checking and setting it here. |
5168 | */ |
5169 | if (caps_cmd.ofldcaps) |
5170 | adap->params.offload = 1; |
5171 | |
5172 | if (caps_cmd.ofldcaps || |
5173 | (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) || |
5174 | (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) { |
5175 | /* query offload-related parameters */ |
5176 | params[0] = FW_PARAM_DEV(NTID); |
5177 | params[1] = FW_PARAM_PFVF(SERVER_START); |
5178 | params[2] = FW_PARAM_PFVF(SERVER_END); |
5179 | params[3] = FW_PARAM_PFVF(TDDP_START); |
5180 | params[4] = FW_PARAM_PFVF(TDDP_END); |
5181 | params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); |
5182 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 6, |
5183 | params, val); |
5184 | if (ret < 0) |
5185 | goto bye; |
5186 | adap->tids.ntids = val[0]; |
5187 | adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); |
5188 | adap->tids.stid_base = val[1]; |
5189 | adap->tids.nstids = val[2] - val[1] + 1; |
5190 | /* |
5191 | * Setup server filter region. Divide the available filter |
5192 | * region into two parts. Regular filters get 1/3rd and server |
5193 | * filters get 2/3rd part. This is only enabled if workarond |
5194 | * path is enabled. |
5195 | * 1. For regular filters. |
5196 | * 2. Server filter: This are special filters which are used |
5197 | * to redirect SYN packets to offload queue. |
5198 | */ |
5199 | if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { |
5200 | adap->tids.sftid_base = adap->tids.ftid_base + |
5201 | DIV_ROUND_UP(adap->tids.nftids, 3); |
5202 | adap->tids.nsftids = adap->tids.nftids - |
5203 | DIV_ROUND_UP(adap->tids.nftids, 3); |
5204 | adap->tids.nftids = adap->tids.sftid_base - |
5205 | adap->tids.ftid_base; |
5206 | } |
5207 | adap->vres.ddp.start = val[3]; |
5208 | adap->vres.ddp.size = val[4] - val[3] + 1; |
5209 | adap->params.ofldq_wr_cred = val[5]; |
5210 | |
5211 | if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) { |
5212 | init_hash_filter(adap); |
5213 | } else { |
5214 | adap->num_ofld_uld += 1; |
5215 | } |
5216 | |
5217 | if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) { |
5218 | params[0] = FW_PARAM_PFVF(ETHOFLD_START); |
5219 | params[1] = FW_PARAM_PFVF(ETHOFLD_END); |
5220 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
5221 | params, val); |
5222 | if (!ret) { |
5223 | adap->tids.eotid_base = val[0]; |
5224 | adap->tids.neotids = min_t(u32, MAX_ATIDS, |
5225 | val[1] - val[0] + 1); |
5226 | adap->params.ethofld = 1; |
5227 | } |
5228 | } |
5229 | } |
5230 | if (caps_cmd.rdmacaps) { |
5231 | params[0] = FW_PARAM_PFVF(STAG_START); |
5232 | params[1] = FW_PARAM_PFVF(STAG_END); |
5233 | params[2] = FW_PARAM_PFVF(RQ_START); |
5234 | params[3] = FW_PARAM_PFVF(RQ_END); |
5235 | params[4] = FW_PARAM_PFVF(PBL_START); |
5236 | params[5] = FW_PARAM_PFVF(PBL_END); |
5237 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 6, |
5238 | params, val); |
5239 | if (ret < 0) |
5240 | goto bye; |
5241 | adap->vres.stag.start = val[0]; |
5242 | adap->vres.stag.size = val[1] - val[0] + 1; |
5243 | adap->vres.rq.start = val[2]; |
5244 | adap->vres.rq.size = val[3] - val[2] + 1; |
5245 | adap->vres.pbl.start = val[4]; |
5246 | adap->vres.pbl.size = val[5] - val[4] + 1; |
5247 | |
5248 | params[0] = FW_PARAM_PFVF(SRQ_START); |
5249 | params[1] = FW_PARAM_PFVF(SRQ_END); |
5250 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
5251 | params, val); |
5252 | if (!ret) { |
5253 | adap->vres.srq.start = val[0]; |
5254 | adap->vres.srq.size = val[1] - val[0] + 1; |
5255 | } |
5256 | if (adap->vres.srq.size) { |
5257 | adap->srq = t4_init_srq(srq_size: adap->vres.srq.size); |
5258 | if (!adap->srq) |
5259 | dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n" ); |
5260 | } |
5261 | |
5262 | params[0] = FW_PARAM_PFVF(SQRQ_START); |
5263 | params[1] = FW_PARAM_PFVF(SQRQ_END); |
5264 | params[2] = FW_PARAM_PFVF(CQ_START); |
5265 | params[3] = FW_PARAM_PFVF(CQ_END); |
5266 | params[4] = FW_PARAM_PFVF(OCQ_START); |
5267 | params[5] = FW_PARAM_PFVF(OCQ_END); |
5268 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 6, params, |
5269 | val); |
5270 | if (ret < 0) |
5271 | goto bye; |
5272 | adap->vres.qp.start = val[0]; |
5273 | adap->vres.qp.size = val[1] - val[0] + 1; |
5274 | adap->vres.cq.start = val[2]; |
5275 | adap->vres.cq.size = val[3] - val[2] + 1; |
5276 | adap->vres.ocq.start = val[4]; |
5277 | adap->vres.ocq.size = val[5] - val[4] + 1; |
5278 | |
5279 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); |
5280 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); |
5281 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, params, |
5282 | val); |
5283 | if (ret < 0) { |
5284 | adap->params.max_ordird_qp = 8; |
5285 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; |
5286 | ret = 0; |
5287 | } else { |
5288 | adap->params.max_ordird_qp = val[0]; |
5289 | adap->params.max_ird_adapter = val[1]; |
5290 | } |
5291 | dev_info(adap->pdev_dev, |
5292 | "max_ordird_qp %d max_ird_adapter %d\n" , |
5293 | adap->params.max_ordird_qp, |
5294 | adap->params.max_ird_adapter); |
5295 | |
5296 | /* Enable write_with_immediate if FW supports it */ |
5297 | params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM); |
5298 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params, |
5299 | val); |
5300 | adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); |
5301 | |
5302 | /* Enable write_cmpl if FW supports it */ |
5303 | params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR); |
5304 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params, |
5305 | val); |
5306 | adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); |
5307 | adap->num_ofld_uld += 2; |
5308 | } |
5309 | if (caps_cmd.iscsicaps) { |
5310 | params[0] = FW_PARAM_PFVF(ISCSI_START); |
5311 | params[1] = FW_PARAM_PFVF(ISCSI_END); |
5312 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
5313 | params, val); |
5314 | if (ret < 0) |
5315 | goto bye; |
5316 | adap->vres.iscsi.start = val[0]; |
5317 | adap->vres.iscsi.size = val[1] - val[0] + 1; |
5318 | if (is_t6(chip: adap->params.chip)) { |
5319 | params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START); |
5320 | params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END); |
5321 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 2, |
5322 | params, val); |
5323 | if (!ret) { |
5324 | adap->vres.ppod_edram.start = val[0]; |
5325 | adap->vres.ppod_edram.size = |
5326 | val[1] - val[0] + 1; |
5327 | |
5328 | dev_info(adap->pdev_dev, |
5329 | "ppod edram start 0x%x end 0x%x size 0x%x\n" , |
5330 | val[0], val[1], |
5331 | adap->vres.ppod_edram.size); |
5332 | } |
5333 | } |
5334 | /* LIO target and cxgb4i initiaitor */ |
5335 | adap->num_ofld_uld += 2; |
5336 | } |
5337 | if (caps_cmd.cryptocaps) { |
5338 | if (ntohs(caps_cmd.cryptocaps) & |
5339 | FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) { |
5340 | params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE); |
5341 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5342 | nparams: 2, params, val); |
5343 | if (ret < 0) { |
5344 | if (ret != -EINVAL) |
5345 | goto bye; |
5346 | } else { |
5347 | adap->vres.ncrypto_fc = val[0]; |
5348 | } |
5349 | adap->num_ofld_uld += 1; |
5350 | } |
5351 | if (ntohs(caps_cmd.cryptocaps) & |
5352 | FW_CAPS_CONFIG_TLS_INLINE) { |
5353 | params[0] = FW_PARAM_PFVF(TLS_START); |
5354 | params[1] = FW_PARAM_PFVF(TLS_END); |
5355 | ret = t4_query_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
5356 | nparams: 2, params, val); |
5357 | if (ret < 0) |
5358 | goto bye; |
5359 | adap->vres.key.start = val[0]; |
5360 | adap->vres.key.size = val[1] - val[0] + 1; |
5361 | adap->num_uld += 1; |
5362 | } |
5363 | adap->params.crypto = ntohs(caps_cmd.cryptocaps); |
5364 | } |
5365 | |
5366 | /* The MTU/MSS Table is initialized by now, so load their values. If |
5367 | * we're initializing the adapter, then we'll make any modifications |
5368 | * we want to the MTU/MSS Table and also initialize the congestion |
5369 | * parameters. |
5370 | */ |
5371 | t4_read_mtu_tbl(adap, mtus: adap->params.mtus, NULL); |
5372 | if (state != DEV_STATE_INIT) { |
5373 | int i; |
5374 | |
5375 | /* The default MTU Table contains values 1492 and 1500. |
5376 | * However, for TCP, it's better to have two values which are |
5377 | * a multiple of 8 +/- 4 bytes apart near this popular MTU. |
5378 | * This allows us to have a TCP Data Payload which is a |
5379 | * multiple of 8 regardless of what combination of TCP Options |
5380 | * are in use (always a multiple of 4 bytes) which is |
5381 | * important for performance reasons. For instance, if no |
5382 | * options are in use, then we have a 20-byte IP header and a |
5383 | * 20-byte TCP header. In this case, a 1500-byte MSS would |
5384 | * result in a TCP Data Payload of 1500 - 40 == 1460 bytes |
5385 | * which is not a multiple of 8. So using an MSS of 1488 in |
5386 | * this case results in a TCP Data Payload of 1448 bytes which |
5387 | * is a multiple of 8. On the other hand, if 12-byte TCP Time |
5388 | * Stamps have been negotiated, then an MTU of 1500 bytes |
5389 | * results in a TCP Data Payload of 1448 bytes which, as |
5390 | * above, is a multiple of 8 bytes ... |
5391 | */ |
5392 | for (i = 0; i < NMTUS; i++) |
5393 | if (adap->params.mtus[i] == 1492) { |
5394 | adap->params.mtus[i] = 1488; |
5395 | break; |
5396 | } |
5397 | |
5398 | t4_load_mtus(adap, mtus: adap->params.mtus, alpha: adap->params.a_wnd, |
5399 | beta: adap->params.b_wnd); |
5400 | } |
5401 | t4_init_sge_params(adapter: adap); |
5402 | adap->flags |= CXGB4_FW_OK; |
5403 | t4_init_tp_params(adap, sleep_ok: true); |
5404 | return 0; |
5405 | |
5406 | /* |
5407 | * Something bad happened. If a command timed out or failed with EIO |
5408 | * FW does not operate within its spec or something catastrophic |
5409 | * happened to HW/FW, stop issuing commands. |
5410 | */ |
5411 | bye: |
5412 | adap_free_hma_mem(adapter: adap); |
5413 | kfree(objp: adap->sge.egr_map); |
5414 | kfree(objp: adap->sge.ingr_map); |
5415 | bitmap_free(bitmap: adap->sge.starving_fl); |
5416 | bitmap_free(bitmap: adap->sge.txq_maperr); |
5417 | #ifdef CONFIG_DEBUG_FS |
5418 | bitmap_free(bitmap: adap->sge.blocked_fl); |
5419 | #endif |
5420 | if (ret != -ETIMEDOUT && ret != -EIO) |
5421 | t4_fw_bye(adap, mbox: adap->mbox); |
5422 | return ret; |
5423 | } |
5424 | |
5425 | /* EEH callbacks */ |
5426 | |
5427 | static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, |
5428 | pci_channel_state_t state) |
5429 | { |
5430 | int i; |
5431 | struct adapter *adap = pci_get_drvdata(pdev); |
5432 | |
5433 | if (!adap) |
5434 | goto out; |
5435 | |
5436 | rtnl_lock(); |
5437 | adap->flags &= ~CXGB4_FW_OK; |
5438 | notify_ulds(adap, new_state: CXGB4_STATE_START_RECOVERY); |
5439 | spin_lock(lock: &adap->stats_lock); |
5440 | for_each_port(adap, i) { |
5441 | struct net_device *dev = adap->port[i]; |
5442 | if (dev) { |
5443 | netif_device_detach(dev); |
5444 | netif_carrier_off(dev); |
5445 | } |
5446 | } |
5447 | spin_unlock(lock: &adap->stats_lock); |
5448 | disable_interrupts(adap); |
5449 | if (adap->flags & CXGB4_FULL_INIT_DONE) |
5450 | cxgb_down(adapter: adap); |
5451 | rtnl_unlock(); |
5452 | if ((adap->flags & CXGB4_DEV_ENABLED)) { |
5453 | pci_disable_device(dev: pdev); |
5454 | adap->flags &= ~CXGB4_DEV_ENABLED; |
5455 | } |
5456 | out: return state == pci_channel_io_perm_failure ? |
5457 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
5458 | } |
5459 | |
5460 | static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) |
5461 | { |
5462 | int i, ret; |
5463 | struct fw_caps_config_cmd c; |
5464 | struct adapter *adap = pci_get_drvdata(pdev); |
5465 | |
5466 | if (!adap) { |
5467 | pci_restore_state(dev: pdev); |
5468 | pci_save_state(dev: pdev); |
5469 | return PCI_ERS_RESULT_RECOVERED; |
5470 | } |
5471 | |
5472 | if (!(adap->flags & CXGB4_DEV_ENABLED)) { |
5473 | if (pci_enable_device(dev: pdev)) { |
5474 | dev_err(&pdev->dev, "Cannot reenable PCI " |
5475 | "device after reset\n" ); |
5476 | return PCI_ERS_RESULT_DISCONNECT; |
5477 | } |
5478 | adap->flags |= CXGB4_DEV_ENABLED; |
5479 | } |
5480 | |
5481 | pci_set_master(dev: pdev); |
5482 | pci_restore_state(dev: pdev); |
5483 | pci_save_state(dev: pdev); |
5484 | |
5485 | if (t4_wait_dev_ready(regs: adap->regs) < 0) |
5486 | return PCI_ERS_RESULT_DISCONNECT; |
5487 | if (t4_fw_hello(adap, mbox: adap->mbox, evt_mbox: adap->pf, master: MASTER_MUST, NULL) < 0) |
5488 | return PCI_ERS_RESULT_DISCONNECT; |
5489 | adap->flags |= CXGB4_FW_OK; |
5490 | if (adap_init1(adap, c: &c)) |
5491 | return PCI_ERS_RESULT_DISCONNECT; |
5492 | |
5493 | for_each_port(adap, i) { |
5494 | struct port_info *pi = adap2pinfo(adap, idx: i); |
5495 | u8 vivld = 0, vin = 0; |
5496 | |
5497 | ret = t4_alloc_vi(adap, mbox: adap->mbox, port: pi->tx_chan, pf: adap->pf, vf: 0, nmac: 1, |
5498 | NULL, NULL, vivld: &vivld, vin: &vin); |
5499 | if (ret < 0) |
5500 | return PCI_ERS_RESULT_DISCONNECT; |
5501 | pi->viid = ret; |
5502 | pi->xact_addr_filt = -1; |
5503 | /* If fw supports returning the VIN as part of FW_VI_CMD, |
5504 | * save the returned values. |
5505 | */ |
5506 | if (adap->params.viid_smt_extn_support) { |
5507 | pi->vivld = vivld; |
5508 | pi->vin = vin; |
5509 | } else { |
5510 | /* Retrieve the values from VIID */ |
5511 | pi->vivld = FW_VIID_VIVLD_G(pi->viid); |
5512 | pi->vin = FW_VIID_VIN_G(pi->viid); |
5513 | } |
5514 | } |
5515 | |
5516 | t4_load_mtus(adap, mtus: adap->params.mtus, alpha: adap->params.a_wnd, |
5517 | beta: adap->params.b_wnd); |
5518 | setup_memwin(adap); |
5519 | if (cxgb_up(adap)) |
5520 | return PCI_ERS_RESULT_DISCONNECT; |
5521 | return PCI_ERS_RESULT_RECOVERED; |
5522 | } |
5523 | |
5524 | static void eeh_resume(struct pci_dev *pdev) |
5525 | { |
5526 | int i; |
5527 | struct adapter *adap = pci_get_drvdata(pdev); |
5528 | |
5529 | if (!adap) |
5530 | return; |
5531 | |
5532 | rtnl_lock(); |
5533 | for_each_port(adap, i) { |
5534 | struct net_device *dev = adap->port[i]; |
5535 | if (dev) { |
5536 | if (netif_running(dev)) { |
5537 | link_start(dev); |
5538 | cxgb_set_rxmode(dev); |
5539 | } |
5540 | netif_device_attach(dev); |
5541 | } |
5542 | } |
5543 | rtnl_unlock(); |
5544 | } |
5545 | |
5546 | static void eeh_reset_prepare(struct pci_dev *pdev) |
5547 | { |
5548 | struct adapter *adapter = pci_get_drvdata(pdev); |
5549 | int i; |
5550 | |
5551 | if (adapter->pf != 4) |
5552 | return; |
5553 | |
5554 | adapter->flags &= ~CXGB4_FW_OK; |
5555 | |
5556 | notify_ulds(adap: adapter, new_state: CXGB4_STATE_DOWN); |
5557 | |
5558 | for_each_port(adapter, i) |
5559 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
5560 | cxgb_close(dev: adapter->port[i]); |
5561 | |
5562 | disable_interrupts(adap: adapter); |
5563 | cxgb4_free_mps_ref_entries(adap: adapter); |
5564 | |
5565 | adap_free_hma_mem(adapter); |
5566 | |
5567 | if (adapter->flags & CXGB4_FULL_INIT_DONE) |
5568 | cxgb_down(adapter); |
5569 | } |
5570 | |
5571 | static void eeh_reset_done(struct pci_dev *pdev) |
5572 | { |
5573 | struct adapter *adapter = pci_get_drvdata(pdev); |
5574 | int err, i; |
5575 | |
5576 | if (adapter->pf != 4) |
5577 | return; |
5578 | |
5579 | err = t4_wait_dev_ready(regs: adapter->regs); |
5580 | if (err < 0) { |
5581 | dev_err(adapter->pdev_dev, |
5582 | "Device not ready, err %d" , err); |
5583 | return; |
5584 | } |
5585 | |
5586 | setup_memwin(adapter); |
5587 | |
5588 | err = adap_init0(adap: adapter, vpd_skip: 1); |
5589 | if (err) { |
5590 | dev_err(adapter->pdev_dev, |
5591 | "Adapter init failed, err %d" , err); |
5592 | return; |
5593 | } |
5594 | |
5595 | setup_memwin_rdma(adapter); |
5596 | |
5597 | if (adapter->flags & CXGB4_FW_OK) { |
5598 | err = t4_port_init(adap: adapter, mbox: adapter->pf, pf: adapter->pf, vf: 0); |
5599 | if (err) { |
5600 | dev_err(adapter->pdev_dev, |
5601 | "Port init failed, err %d" , err); |
5602 | return; |
5603 | } |
5604 | } |
5605 | |
5606 | err = cfg_queues(adap: adapter); |
5607 | if (err) { |
5608 | dev_err(adapter->pdev_dev, |
5609 | "Config queues failed, err %d" , err); |
5610 | return; |
5611 | } |
5612 | |
5613 | cxgb4_init_mps_ref_entries(adap: adapter); |
5614 | |
5615 | err = setup_fw_sge_queues(adapter); |
5616 | if (err) { |
5617 | dev_err(adapter->pdev_dev, |
5618 | "FW sge queue allocation failed, err %d" , err); |
5619 | return; |
5620 | } |
5621 | |
5622 | for_each_port(adapter, i) |
5623 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
5624 | cxgb_open(dev: adapter->port[i]); |
5625 | } |
5626 | |
5627 | static const struct pci_error_handlers cxgb4_eeh = { |
5628 | .error_detected = eeh_err_detected, |
5629 | .slot_reset = eeh_slot_reset, |
5630 | .resume = eeh_resume, |
5631 | .reset_prepare = eeh_reset_prepare, |
5632 | .reset_done = eeh_reset_done, |
5633 | }; |
5634 | |
5635 | /* Return true if the Link Configuration supports "High Speeds" (those greater |
5636 | * than 1Gb/s). |
5637 | */ |
5638 | static inline bool is_x_10g_port(const struct link_config *lc) |
5639 | { |
5640 | unsigned int speeds, high_speeds; |
5641 | |
5642 | speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); |
5643 | high_speeds = speeds & |
5644 | ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); |
5645 | |
5646 | return high_speeds != 0; |
5647 | } |
5648 | |
5649 | /* Perform default configuration of DMA queues depending on the number and type |
5650 | * of ports we found and the number of available CPUs. Most settings can be |
5651 | * modified by the admin prior to actual use. |
5652 | */ |
5653 | static int cfg_queues(struct adapter *adap) |
5654 | { |
5655 | u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; |
5656 | u32 ncpus = num_online_cpus(); |
5657 | u32 niqflint, neq, num_ulds; |
5658 | struct sge *s = &adap->sge; |
5659 | u32 i, n10g = 0, qidx = 0; |
5660 | u32 q10g = 0, q1g; |
5661 | |
5662 | /* Reduce memory usage in kdump environment, disable all offload. */ |
5663 | if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { |
5664 | adap->params.offload = 0; |
5665 | adap->params.crypto = 0; |
5666 | adap->params.ethofld = 0; |
5667 | } |
5668 | |
5669 | /* Calculate the number of Ethernet Queue Sets available based on |
5670 | * resources provisioned for us. We always have an Asynchronous |
5671 | * Firmware Event Ingress Queue. If we're operating in MSI or Legacy |
5672 | * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt |
5673 | * Ingress Queue. Meanwhile, we need two Egress Queues for each |
5674 | * Queue Set: one for the Free List and one for the Ethernet TX Queue. |
5675 | * |
5676 | * Note that we should also take into account all of the various |
5677 | * Offload Queues. But, in any situation where we're operating in |
5678 | * a Resource Constrained Provisioning environment, doing any Offload |
5679 | * at all is problematic ... |
5680 | */ |
5681 | niqflint = adap->params.pfres.niqflint - 1; |
5682 | if (!(adap->flags & CXGB4_USING_MSIX)) |
5683 | niqflint--; |
5684 | neq = adap->params.pfres.neq / 2; |
5685 | avail_qsets = min(niqflint, neq); |
5686 | |
5687 | if (avail_qsets < adap->params.nports) { |
5688 | dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n" , |
5689 | avail_qsets, adap->params.nports); |
5690 | return -ENOMEM; |
5691 | } |
5692 | |
5693 | /* Count the number of 10Gb/s or better ports */ |
5694 | for_each_port(adap, i) |
5695 | n10g += is_x_10g_port(lc: &adap2pinfo(adap, idx: i)->link_cfg); |
5696 | |
5697 | avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); |
5698 | |
5699 | /* We default to 1 queue per non-10G port and up to # of cores queues |
5700 | * per 10G port. |
5701 | */ |
5702 | if (n10g) |
5703 | q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; |
5704 | |
5705 | #ifdef CONFIG_CHELSIO_T4_DCB |
5706 | /* For Data Center Bridging support we need to be able to support up |
5707 | * to 8 Traffic Priorities; each of which will be assigned to its |
5708 | * own TX Queue in order to prevent Head-Of-Line Blocking. |
5709 | */ |
5710 | q1g = 8; |
5711 | if (adap->params.nports * 8 > avail_eth_qsets) { |
5712 | dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n" , |
5713 | avail_eth_qsets, adap->params.nports * 8); |
5714 | return -ENOMEM; |
5715 | } |
5716 | |
5717 | if (adap->params.nports * ncpus < avail_eth_qsets) |
5718 | q10g = max(8U, ncpus); |
5719 | else |
5720 | q10g = max(8U, q10g); |
5721 | |
5722 | while ((q10g * n10g) > |
5723 | (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) |
5724 | q10g--; |
5725 | |
5726 | #else /* !CONFIG_CHELSIO_T4_DCB */ |
5727 | q1g = 1; |
5728 | q10g = min(q10g, ncpus); |
5729 | #endif /* !CONFIG_CHELSIO_T4_DCB */ |
5730 | if (is_kdump_kernel()) { |
5731 | q10g = 1; |
5732 | q1g = 1; |
5733 | } |
5734 | |
5735 | for_each_port(adap, i) { |
5736 | struct port_info *pi = adap2pinfo(adap, idx: i); |
5737 | |
5738 | pi->first_qset = qidx; |
5739 | pi->nqsets = is_x_10g_port(lc: &pi->link_cfg) ? q10g : q1g; |
5740 | qidx += pi->nqsets; |
5741 | } |
5742 | |
5743 | s->ethqsets = qidx; |
5744 | s->max_ethqsets = qidx; /* MSI-X may lower it later */ |
5745 | avail_qsets -= qidx; |
5746 | |
5747 | if (is_uld(adap)) { |
5748 | /* For offload we use 1 queue/channel if all ports are up to 1G, |
5749 | * otherwise we divide all available queues amongst the channels |
5750 | * capped by the number of available cores. |
5751 | */ |
5752 | num_ulds = adap->num_uld + adap->num_ofld_uld; |
5753 | i = min_t(u32, MAX_OFLD_QSETS, ncpus); |
5754 | avail_uld_qsets = roundup(i, adap->params.nports); |
5755 | if (avail_qsets < num_ulds * adap->params.nports) { |
5756 | adap->params.offload = 0; |
5757 | adap->params.crypto = 0; |
5758 | s->ofldqsets = 0; |
5759 | } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) { |
5760 | s->ofldqsets = adap->params.nports; |
5761 | } else { |
5762 | s->ofldqsets = avail_uld_qsets; |
5763 | } |
5764 | |
5765 | avail_qsets -= num_ulds * s->ofldqsets; |
5766 | } |
5767 | |
5768 | /* ETHOFLD Queues used for QoS offload should follow same |
5769 | * allocation scheme as normal Ethernet Queues. |
5770 | */ |
5771 | if (is_ethofld(adap)) { |
5772 | if (avail_qsets < s->max_ethqsets) { |
5773 | adap->params.ethofld = 0; |
5774 | s->eoqsets = 0; |
5775 | } else { |
5776 | s->eoqsets = s->max_ethqsets; |
5777 | } |
5778 | avail_qsets -= s->eoqsets; |
5779 | } |
5780 | |
5781 | /* Mirror queues must follow same scheme as normal Ethernet |
5782 | * Queues, when there are enough queues available. Otherwise, |
5783 | * allocate at least 1 queue per port. If even 1 queue is not |
5784 | * available, then disable mirror queues support. |
5785 | */ |
5786 | if (avail_qsets >= s->max_ethqsets) |
5787 | s->mirrorqsets = s->max_ethqsets; |
5788 | else if (avail_qsets >= adap->params.nports) |
5789 | s->mirrorqsets = adap->params.nports; |
5790 | else |
5791 | s->mirrorqsets = 0; |
5792 | avail_qsets -= s->mirrorqsets; |
5793 | |
5794 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { |
5795 | struct sge_eth_rxq *r = &s->ethrxq[i]; |
5796 | |
5797 | init_rspq(adap, q: &r->rspq, us: 5, cnt: 10, size: 1024, iqe_size: 64); |
5798 | r->fl.size = 72; |
5799 | } |
5800 | |
5801 | for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) |
5802 | s->ethtxq[i].q.size = 1024; |
5803 | |
5804 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) |
5805 | s->ctrlq[i].q.size = 512; |
5806 | |
5807 | if (!is_t4(chip: adap->params.chip)) |
5808 | s->ptptxq.q.size = 8; |
5809 | |
5810 | init_rspq(adap, q: &s->fw_evtq, us: 0, cnt: 1, size: 1024, iqe_size: 64); |
5811 | init_rspq(adap, q: &s->intrq, us: 0, cnt: 1, size: 512, iqe_size: 64); |
5812 | |
5813 | return 0; |
5814 | } |
5815 | |
5816 | /* |
5817 | * Reduce the number of Ethernet queues across all ports to at most n. |
5818 | * n provides at least one queue per port. |
5819 | */ |
5820 | static void reduce_ethqs(struct adapter *adap, int n) |
5821 | { |
5822 | int i; |
5823 | struct port_info *pi; |
5824 | |
5825 | while (n < adap->sge.ethqsets) |
5826 | for_each_port(adap, i) { |
5827 | pi = adap2pinfo(adap, idx: i); |
5828 | if (pi->nqsets > 1) { |
5829 | pi->nqsets--; |
5830 | adap->sge.ethqsets--; |
5831 | if (adap->sge.ethqsets <= n) |
5832 | break; |
5833 | } |
5834 | } |
5835 | |
5836 | n = 0; |
5837 | for_each_port(adap, i) { |
5838 | pi = adap2pinfo(adap, idx: i); |
5839 | pi->first_qset = n; |
5840 | n += pi->nqsets; |
5841 | } |
5842 | } |
5843 | |
5844 | static int alloc_msix_info(struct adapter *adap, u32 num_vec) |
5845 | { |
5846 | struct msix_info *msix_info; |
5847 | |
5848 | msix_info = kcalloc(n: num_vec, size: sizeof(*msix_info), GFP_KERNEL); |
5849 | if (!msix_info) |
5850 | return -ENOMEM; |
5851 | |
5852 | adap->msix_bmap.msix_bmap = bitmap_zalloc(nbits: num_vec, GFP_KERNEL); |
5853 | if (!adap->msix_bmap.msix_bmap) { |
5854 | kfree(objp: msix_info); |
5855 | return -ENOMEM; |
5856 | } |
5857 | |
5858 | spin_lock_init(&adap->msix_bmap.lock); |
5859 | adap->msix_bmap.mapsize = num_vec; |
5860 | |
5861 | adap->msix_info = msix_info; |
5862 | return 0; |
5863 | } |
5864 | |
5865 | static void free_msix_info(struct adapter *adap) |
5866 | { |
5867 | bitmap_free(bitmap: adap->msix_bmap.msix_bmap); |
5868 | kfree(objp: adap->msix_info); |
5869 | } |
5870 | |
5871 | int cxgb4_get_msix_idx_from_bmap(struct adapter *adap) |
5872 | { |
5873 | struct msix_bmap *bmap = &adap->msix_bmap; |
5874 | unsigned int msix_idx; |
5875 | unsigned long flags; |
5876 | |
5877 | spin_lock_irqsave(&bmap->lock, flags); |
5878 | msix_idx = find_first_zero_bit(addr: bmap->msix_bmap, size: bmap->mapsize); |
5879 | if (msix_idx < bmap->mapsize) { |
5880 | __set_bit(msix_idx, bmap->msix_bmap); |
5881 | } else { |
5882 | spin_unlock_irqrestore(lock: &bmap->lock, flags); |
5883 | return -ENOSPC; |
5884 | } |
5885 | |
5886 | spin_unlock_irqrestore(lock: &bmap->lock, flags); |
5887 | return msix_idx; |
5888 | } |
5889 | |
5890 | void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, |
5891 | unsigned int msix_idx) |
5892 | { |
5893 | struct msix_bmap *bmap = &adap->msix_bmap; |
5894 | unsigned long flags; |
5895 | |
5896 | spin_lock_irqsave(&bmap->lock, flags); |
5897 | __clear_bit(msix_idx, bmap->msix_bmap); |
5898 | spin_unlock_irqrestore(lock: &bmap->lock, flags); |
5899 | } |
5900 | |
5901 | /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ |
5902 | #define 2 |
5903 | |
5904 | static int enable_msix(struct adapter *adap) |
5905 | { |
5906 | u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0; |
5907 | u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0; |
5908 | u8 num_uld = 0, nchan = adap->params.nports; |
5909 | u32 i, want, need, num_vec; |
5910 | struct sge *s = &adap->sge; |
5911 | struct msix_entry *entries; |
5912 | struct port_info *pi; |
5913 | int allocated, ret; |
5914 | |
5915 | want = s->max_ethqsets; |
5916 | #ifdef CONFIG_CHELSIO_T4_DCB |
5917 | /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for |
5918 | * each port. |
5919 | */ |
5920 | need = 8 * nchan; |
5921 | #else |
5922 | need = nchan; |
5923 | #endif |
5924 | eth_need = need; |
5925 | if (is_uld(adap)) { |
5926 | num_uld = adap->num_ofld_uld + adap->num_uld; |
5927 | want += num_uld * s->ofldqsets; |
5928 | uld_need = num_uld * nchan; |
5929 | need += uld_need; |
5930 | } |
5931 | |
5932 | if (is_ethofld(adap)) { |
5933 | want += s->eoqsets; |
5934 | ethofld_need = eth_need; |
5935 | need += ethofld_need; |
5936 | } |
5937 | |
5938 | if (s->mirrorqsets) { |
5939 | want += s->mirrorqsets; |
5940 | mirror_need = nchan; |
5941 | need += mirror_need; |
5942 | } |
5943 | |
5944 | want += EXTRA_VECS; |
5945 | need += EXTRA_VECS; |
5946 | |
5947 | entries = kmalloc_array(n: want, size: sizeof(*entries), GFP_KERNEL); |
5948 | if (!entries) |
5949 | return -ENOMEM; |
5950 | |
5951 | for (i = 0; i < want; i++) |
5952 | entries[i].entry = i; |
5953 | |
5954 | allocated = pci_enable_msix_range(dev: adap->pdev, entries, minvec: need, maxvec: want); |
5955 | if (allocated < 0) { |
5956 | /* Disable offload and attempt to get vectors for NIC |
5957 | * only mode. |
5958 | */ |
5959 | want = s->max_ethqsets + EXTRA_VECS; |
5960 | need = eth_need + EXTRA_VECS; |
5961 | allocated = pci_enable_msix_range(dev: adap->pdev, entries, |
5962 | minvec: need, maxvec: want); |
5963 | if (allocated < 0) { |
5964 | dev_info(adap->pdev_dev, |
5965 | "Disabling MSI-X due to insufficient MSI-X vectors\n" ); |
5966 | ret = allocated; |
5967 | goto out_free; |
5968 | } |
5969 | |
5970 | dev_info(adap->pdev_dev, |
5971 | "Disabling offload due to insufficient MSI-X vectors\n" ); |
5972 | adap->params.offload = 0; |
5973 | adap->params.crypto = 0; |
5974 | adap->params.ethofld = 0; |
5975 | s->ofldqsets = 0; |
5976 | s->eoqsets = 0; |
5977 | s->mirrorqsets = 0; |
5978 | uld_need = 0; |
5979 | ethofld_need = 0; |
5980 | mirror_need = 0; |
5981 | } |
5982 | |
5983 | num_vec = allocated; |
5984 | if (num_vec < want) { |
5985 | /* Distribute available vectors to the various queue groups. |
5986 | * Every group gets its minimum requirement and NIC gets top |
5987 | * priority for leftovers. |
5988 | */ |
5989 | ethqsets = eth_need; |
5990 | if (is_uld(adap)) |
5991 | ofldqsets = nchan; |
5992 | if (is_ethofld(adap)) |
5993 | eoqsets = ethofld_need; |
5994 | if (s->mirrorqsets) |
5995 | mirrorqsets = mirror_need; |
5996 | |
5997 | num_vec -= need; |
5998 | while (num_vec) { |
5999 | if (num_vec < eth_need + ethofld_need || |
6000 | ethqsets > s->max_ethqsets) |
6001 | break; |
6002 | |
6003 | for_each_port(adap, i) { |
6004 | pi = adap2pinfo(adap, idx: i); |
6005 | if (pi->nqsets < 2) |
6006 | continue; |
6007 | |
6008 | ethqsets++; |
6009 | num_vec--; |
6010 | if (ethofld_need) { |
6011 | eoqsets++; |
6012 | num_vec--; |
6013 | } |
6014 | } |
6015 | } |
6016 | |
6017 | if (is_uld(adap)) { |
6018 | while (num_vec) { |
6019 | if (num_vec < uld_need || |
6020 | ofldqsets > s->ofldqsets) |
6021 | break; |
6022 | |
6023 | ofldqsets++; |
6024 | num_vec -= uld_need; |
6025 | } |
6026 | } |
6027 | |
6028 | if (s->mirrorqsets) { |
6029 | while (num_vec) { |
6030 | if (num_vec < mirror_need || |
6031 | mirrorqsets > s->mirrorqsets) |
6032 | break; |
6033 | |
6034 | mirrorqsets++; |
6035 | num_vec -= mirror_need; |
6036 | } |
6037 | } |
6038 | } else { |
6039 | ethqsets = s->max_ethqsets; |
6040 | if (is_uld(adap)) |
6041 | ofldqsets = s->ofldqsets; |
6042 | if (is_ethofld(adap)) |
6043 | eoqsets = s->eoqsets; |
6044 | if (s->mirrorqsets) |
6045 | mirrorqsets = s->mirrorqsets; |
6046 | } |
6047 | |
6048 | if (ethqsets < s->max_ethqsets) { |
6049 | s->max_ethqsets = ethqsets; |
6050 | reduce_ethqs(adap, n: ethqsets); |
6051 | } |
6052 | |
6053 | if (is_uld(adap)) { |
6054 | s->ofldqsets = ofldqsets; |
6055 | s->nqs_per_uld = s->ofldqsets; |
6056 | } |
6057 | |
6058 | if (is_ethofld(adap)) |
6059 | s->eoqsets = eoqsets; |
6060 | |
6061 | if (s->mirrorqsets) { |
6062 | s->mirrorqsets = mirrorqsets; |
6063 | for_each_port(adap, i) { |
6064 | pi = adap2pinfo(adap, idx: i); |
6065 | pi->nmirrorqsets = s->mirrorqsets / nchan; |
6066 | mutex_init(&pi->vi_mirror_mutex); |
6067 | } |
6068 | } |
6069 | |
6070 | /* map for msix */ |
6071 | ret = alloc_msix_info(adap, num_vec: allocated); |
6072 | if (ret) |
6073 | goto out_disable_msix; |
6074 | |
6075 | for (i = 0; i < allocated; i++) { |
6076 | adap->msix_info[i].vec = entries[i].vector; |
6077 | adap->msix_info[i].idx = i; |
6078 | } |
6079 | |
6080 | dev_info(adap->pdev_dev, |
6081 | "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n" , |
6082 | allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, |
6083 | s->mirrorqsets); |
6084 | |
6085 | kfree(objp: entries); |
6086 | return 0; |
6087 | |
6088 | out_disable_msix: |
6089 | pci_disable_msix(dev: adap->pdev); |
6090 | |
6091 | out_free: |
6092 | kfree(objp: entries); |
6093 | return ret; |
6094 | } |
6095 | |
6096 | #undef EXTRA_VECS |
6097 | |
6098 | static int (struct adapter *adap) |
6099 | { |
6100 | unsigned int i; |
6101 | int err; |
6102 | |
6103 | err = t4_init_rss_mode(adap, mbox: adap->mbox); |
6104 | if (err) |
6105 | return err; |
6106 | |
6107 | for_each_port(adap, i) { |
6108 | struct port_info *pi = adap2pinfo(adap, idx: i); |
6109 | |
6110 | pi->rss = kcalloc(n: pi->rss_size, size: sizeof(u16), GFP_KERNEL); |
6111 | if (!pi->rss) |
6112 | return -ENOMEM; |
6113 | } |
6114 | return 0; |
6115 | } |
6116 | |
6117 | /* Dump basic information about the adapter */ |
6118 | static void print_adapter_info(struct adapter *adapter) |
6119 | { |
6120 | /* Hardware/Firmware/etc. Version/Revision IDs */ |
6121 | t4_dump_version_info(adapter); |
6122 | |
6123 | /* Software/Hardware configuration */ |
6124 | dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n" , |
6125 | is_offload(adapter) ? "R" : "" , |
6126 | ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : |
6127 | (adapter->flags & CXGB4_USING_MSI) ? "MSI" : "" ), |
6128 | is_offload(adapter) ? "Offload" : "non-Offload" ); |
6129 | } |
6130 | |
6131 | static void print_port_info(const struct net_device *dev) |
6132 | { |
6133 | char buf[80]; |
6134 | char *bufp = buf; |
6135 | const struct port_info *pi = netdev_priv(dev); |
6136 | const struct adapter *adap = pi->adapter; |
6137 | |
6138 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) |
6139 | bufp += sprintf(buf: bufp, fmt: "100M/" ); |
6140 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) |
6141 | bufp += sprintf(buf: bufp, fmt: "1G/" ); |
6142 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) |
6143 | bufp += sprintf(buf: bufp, fmt: "10G/" ); |
6144 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) |
6145 | bufp += sprintf(buf: bufp, fmt: "25G/" ); |
6146 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) |
6147 | bufp += sprintf(buf: bufp, fmt: "40G/" ); |
6148 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) |
6149 | bufp += sprintf(buf: bufp, fmt: "50G/" ); |
6150 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) |
6151 | bufp += sprintf(buf: bufp, fmt: "100G/" ); |
6152 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) |
6153 | bufp += sprintf(buf: bufp, fmt: "200G/" ); |
6154 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) |
6155 | bufp += sprintf(buf: bufp, fmt: "400G/" ); |
6156 | if (bufp != buf) |
6157 | --bufp; |
6158 | sprintf(buf: bufp, fmt: "BASE-%s" , t4_get_port_type_description(port_type: pi->port_type)); |
6159 | |
6160 | netdev_info(dev, format: "Chelsio %s %s\n" , adap->params.vpd.id, buf); |
6161 | } |
6162 | |
6163 | /* |
6164 | * Free the following resources: |
6165 | * - memory used for tables |
6166 | * - MSI/MSI-X |
6167 | * - net devices |
6168 | * - resources FW is holding for us |
6169 | */ |
6170 | static void free_some_resources(struct adapter *adapter) |
6171 | { |
6172 | unsigned int i; |
6173 | |
6174 | kvfree(addr: adapter->smt); |
6175 | kvfree(addr: adapter->l2t); |
6176 | kvfree(addr: adapter->srq); |
6177 | t4_cleanup_sched(adap: adapter); |
6178 | kvfree(addr: adapter->tids.tid_tab); |
6179 | cxgb4_cleanup_tc_matchall(adap: adapter); |
6180 | cxgb4_cleanup_tc_mqprio(adap: adapter); |
6181 | cxgb4_cleanup_tc_flower(adap: adapter); |
6182 | cxgb4_cleanup_tc_u32(adapter); |
6183 | cxgb4_cleanup_ethtool_filters(adap: adapter); |
6184 | kfree(objp: adapter->sge.egr_map); |
6185 | kfree(objp: adapter->sge.ingr_map); |
6186 | bitmap_free(bitmap: adapter->sge.starving_fl); |
6187 | bitmap_free(bitmap: adapter->sge.txq_maperr); |
6188 | #ifdef CONFIG_DEBUG_FS |
6189 | bitmap_free(bitmap: adapter->sge.blocked_fl); |
6190 | #endif |
6191 | disable_msi(adapter); |
6192 | |
6193 | for_each_port(adapter, i) |
6194 | if (adapter->port[i]) { |
6195 | struct port_info *pi = adap2pinfo(adap: adapter, idx: i); |
6196 | |
6197 | if (pi->viid != 0) |
6198 | t4_free_vi(adap: adapter, mbox: adapter->mbox, pf: adapter->pf, |
6199 | vf: 0, viid: pi->viid); |
6200 | kfree(objp: adap2pinfo(adap: adapter, idx: i)->rss); |
6201 | free_netdev(dev: adapter->port[i]); |
6202 | } |
6203 | if (adapter->flags & CXGB4_FW_OK) |
6204 | t4_fw_bye(adap: adapter, mbox: adapter->pf); |
6205 | } |
6206 | |
6207 | #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \ |
6208 | NETIF_F_GSO_UDP_L4) |
6209 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ |
6210 | NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) |
6211 | #define SEGMENT_SIZE 128 |
6212 | |
6213 | static int t4_get_chip_type(struct adapter *adap, int ver) |
6214 | { |
6215 | u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); |
6216 | |
6217 | switch (ver) { |
6218 | case CHELSIO_T4: |
6219 | return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); |
6220 | case CHELSIO_T5: |
6221 | return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); |
6222 | case CHELSIO_T6: |
6223 | return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); |
6224 | default: |
6225 | break; |
6226 | } |
6227 | return -EINVAL; |
6228 | } |
6229 | |
6230 | #ifdef CONFIG_PCI_IOV |
6231 | static void cxgb4_mgmt_setup(struct net_device *dev) |
6232 | { |
6233 | dev->type = ARPHRD_NONE; |
6234 | dev->mtu = 0; |
6235 | dev->hard_header_len = 0; |
6236 | dev->addr_len = 0; |
6237 | dev->tx_queue_len = 0; |
6238 | dev->flags |= IFF_NOARP; |
6239 | dev->priv_flags |= IFF_NO_QUEUE; |
6240 | |
6241 | /* Initialize the device structure. */ |
6242 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; |
6243 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; |
6244 | } |
6245 | |
6246 | static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) |
6247 | { |
6248 | struct adapter *adap = pci_get_drvdata(pdev); |
6249 | int err = 0; |
6250 | int current_vfs = pci_num_vf(dev: pdev); |
6251 | u32 pcie_fw; |
6252 | |
6253 | pcie_fw = readl(addr: adap->regs + PCIE_FW_A); |
6254 | /* Check if fw is initialized */ |
6255 | if (!(pcie_fw & PCIE_FW_INIT_F)) { |
6256 | dev_warn(&pdev->dev, "Device not initialized\n" ); |
6257 | return -EOPNOTSUPP; |
6258 | } |
6259 | |
6260 | /* If any of the VF's is already assigned to Guest OS, then |
6261 | * SRIOV for the same cannot be modified |
6262 | */ |
6263 | if (current_vfs && pci_vfs_assigned(dev: pdev)) { |
6264 | dev_err(&pdev->dev, |
6265 | "Cannot modify SR-IOV while VFs are assigned\n" ); |
6266 | return current_vfs; |
6267 | } |
6268 | /* Note that the upper-level code ensures that we're never called with |
6269 | * a non-zero "num_vfs" when we already have VFs instantiated. But |
6270 | * it never hurts to code defensively. |
6271 | */ |
6272 | if (num_vfs != 0 && current_vfs != 0) |
6273 | return -EBUSY; |
6274 | |
6275 | /* Nothing to do for no change. */ |
6276 | if (num_vfs == current_vfs) |
6277 | return num_vfs; |
6278 | |
6279 | /* Disable SRIOV when zero is passed. */ |
6280 | if (!num_vfs) { |
6281 | pci_disable_sriov(dev: pdev); |
6282 | /* free VF Management Interface */ |
6283 | unregister_netdev(dev: adap->port[0]); |
6284 | free_netdev(dev: adap->port[0]); |
6285 | adap->port[0] = NULL; |
6286 | |
6287 | /* free VF resources */ |
6288 | adap->num_vfs = 0; |
6289 | kfree(objp: adap->vfinfo); |
6290 | adap->vfinfo = NULL; |
6291 | return 0; |
6292 | } |
6293 | |
6294 | if (!current_vfs) { |
6295 | struct fw_pfvf_cmd port_cmd, port_rpl; |
6296 | struct net_device *netdev; |
6297 | unsigned int pmask, port; |
6298 | struct pci_dev *pbridge; |
6299 | struct port_info *pi; |
6300 | char name[IFNAMSIZ]; |
6301 | u32 devcap2; |
6302 | u16 flags; |
6303 | |
6304 | /* If we want to instantiate Virtual Functions, then our |
6305 | * parent bridge's PCI-E needs to support Alternative Routing |
6306 | * ID (ARI) because our VFs will show up at function offset 8 |
6307 | * and above. |
6308 | */ |
6309 | pbridge = pdev->bus->self; |
6310 | pcie_capability_read_word(dev: pbridge, PCI_EXP_FLAGS, val: &flags); |
6311 | pcie_capability_read_dword(dev: pbridge, PCI_EXP_DEVCAP2, val: &devcap2); |
6312 | |
6313 | if ((flags & PCI_EXP_FLAGS_VERS) < 2 || |
6314 | !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { |
6315 | /* Our parent bridge does not support ARI so issue a |
6316 | * warning and skip instantiating the VFs. They |
6317 | * won't be reachable. |
6318 | */ |
6319 | dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n" , |
6320 | pbridge->bus->number, PCI_SLOT(pbridge->devfn), |
6321 | PCI_FUNC(pbridge->devfn)); |
6322 | return -ENOTSUPP; |
6323 | } |
6324 | memset(&port_cmd, 0, sizeof(port_cmd)); |
6325 | port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | |
6326 | FW_CMD_REQUEST_F | |
6327 | FW_CMD_READ_F | |
6328 | FW_PFVF_CMD_PFN_V(adap->pf) | |
6329 | FW_PFVF_CMD_VFN_V(0)); |
6330 | port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd)); |
6331 | err = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &port_cmd, size: sizeof(port_cmd), |
6332 | rpl: &port_rpl); |
6333 | if (err) |
6334 | return err; |
6335 | pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq)); |
6336 | port = ffs(pmask) - 1; |
6337 | /* Allocate VF Management Interface. */ |
6338 | snprintf(buf: name, IFNAMSIZ, fmt: "mgmtpf%d,%d" , adap->adap_idx, |
6339 | adap->pf); |
6340 | netdev = alloc_netdev(sizeof(struct port_info), |
6341 | name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup); |
6342 | if (!netdev) |
6343 | return -ENOMEM; |
6344 | |
6345 | pi = netdev_priv(dev: netdev); |
6346 | pi->adapter = adap; |
6347 | pi->lport = port; |
6348 | pi->tx_chan = port; |
6349 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6350 | |
6351 | adap->port[0] = netdev; |
6352 | pi->port_id = 0; |
6353 | |
6354 | err = register_netdev(dev: adap->port[0]); |
6355 | if (err) { |
6356 | pr_info("Unable to register VF mgmt netdev %s\n" , name); |
6357 | free_netdev(dev: adap->port[0]); |
6358 | adap->port[0] = NULL; |
6359 | return err; |
6360 | } |
6361 | /* Allocate and set up VF Information. */ |
6362 | adap->vfinfo = kcalloc(n: pci_sriov_get_totalvfs(dev: pdev), |
6363 | size: sizeof(struct vf_info), GFP_KERNEL); |
6364 | if (!adap->vfinfo) { |
6365 | unregister_netdev(dev: adap->port[0]); |
6366 | free_netdev(dev: adap->port[0]); |
6367 | adap->port[0] = NULL; |
6368 | return -ENOMEM; |
6369 | } |
6370 | cxgb4_mgmt_fill_vf_station_mac_addr(adap); |
6371 | } |
6372 | /* Instantiate the requested number of VFs. */ |
6373 | err = pci_enable_sriov(dev: pdev, nr_virtfn: num_vfs); |
6374 | if (err) { |
6375 | pr_info("Unable to instantiate %d VFs\n" , num_vfs); |
6376 | if (!current_vfs) { |
6377 | unregister_netdev(dev: adap->port[0]); |
6378 | free_netdev(dev: adap->port[0]); |
6379 | adap->port[0] = NULL; |
6380 | kfree(objp: adap->vfinfo); |
6381 | adap->vfinfo = NULL; |
6382 | } |
6383 | return err; |
6384 | } |
6385 | |
6386 | adap->num_vfs = num_vfs; |
6387 | return num_vfs; |
6388 | } |
6389 | #endif /* CONFIG_PCI_IOV */ |
6390 | |
6391 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
6392 | |
6393 | static int chcr_offload_state(struct adapter *adap, |
6394 | enum cxgb4_netdev_tls_ops op_val) |
6395 | { |
6396 | switch (op_val) { |
6397 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
6398 | case CXGB4_TLSDEV_OPS: |
6399 | if (!adap->uld[CXGB4_ULD_KTLS].handle) { |
6400 | dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n" ); |
6401 | return -EOPNOTSUPP; |
6402 | } |
6403 | if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { |
6404 | dev_dbg(adap->pdev_dev, |
6405 | "ch_ktls driver has no registered tlsdev_ops\n" ); |
6406 | return -EOPNOTSUPP; |
6407 | } |
6408 | break; |
6409 | #endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
6410 | #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
6411 | case CXGB4_XFRMDEV_OPS: |
6412 | if (!adap->uld[CXGB4_ULD_IPSEC].handle) { |
6413 | dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n" ); |
6414 | return -EOPNOTSUPP; |
6415 | } |
6416 | if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { |
6417 | dev_dbg(adap->pdev_dev, |
6418 | "chipsec driver has no registered xfrmdev_ops\n" ); |
6419 | return -EOPNOTSUPP; |
6420 | } |
6421 | break; |
6422 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
6423 | default: |
6424 | dev_dbg(adap->pdev_dev, |
6425 | "driver has no support for offload %d\n" , op_val); |
6426 | return -EOPNOTSUPP; |
6427 | } |
6428 | |
6429 | return 0; |
6430 | } |
6431 | |
6432 | #endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */ |
6433 | |
6434 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
6435 | |
6436 | static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, |
6437 | enum tls_offload_ctx_dir direction, |
6438 | struct tls_crypto_info *crypto_info, |
6439 | u32 tcp_sn) |
6440 | { |
6441 | struct adapter *adap = netdev2adap(dev: netdev); |
6442 | int ret; |
6443 | |
6444 | mutex_lock(&uld_mutex); |
6445 | ret = chcr_offload_state(adap, op_val: CXGB4_TLSDEV_OPS); |
6446 | if (ret) |
6447 | goto out_unlock; |
6448 | |
6449 | ret = cxgb4_set_ktls_feature(adap, enable: FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); |
6450 | if (ret) |
6451 | goto out_unlock; |
6452 | |
6453 | ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, |
6454 | direction, |
6455 | crypto_info, |
6456 | tcp_sn); |
6457 | /* if there is a failure, clear the refcount */ |
6458 | if (ret) |
6459 | cxgb4_set_ktls_feature(adap, |
6460 | enable: FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); |
6461 | out_unlock: |
6462 | mutex_unlock(lock: &uld_mutex); |
6463 | return ret; |
6464 | } |
6465 | |
6466 | static void cxgb4_ktls_dev_del(struct net_device *netdev, |
6467 | struct tls_context *tls_ctx, |
6468 | enum tls_offload_ctx_dir direction) |
6469 | { |
6470 | struct adapter *adap = netdev2adap(dev: netdev); |
6471 | |
6472 | mutex_lock(&uld_mutex); |
6473 | if (chcr_offload_state(adap, op_val: CXGB4_TLSDEV_OPS)) |
6474 | goto out_unlock; |
6475 | |
6476 | adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
6477 | direction); |
6478 | |
6479 | out_unlock: |
6480 | cxgb4_set_ktls_feature(adap, enable: FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); |
6481 | mutex_unlock(lock: &uld_mutex); |
6482 | } |
6483 | |
6484 | static const struct tlsdev_ops cxgb4_ktls_ops = { |
6485 | .tls_dev_add = cxgb4_ktls_dev_add, |
6486 | .tls_dev_del = cxgb4_ktls_dev_del, |
6487 | }; |
6488 | #endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
6489 | |
6490 | #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
6491 | |
6492 | static int cxgb4_xfrm_add_state(struct xfrm_state *x, |
6493 | struct netlink_ext_ack *extack) |
6494 | { |
6495 | struct adapter *adap = netdev2adap(dev: x->xso.dev); |
6496 | int ret; |
6497 | |
6498 | if (!mutex_trylock(lock: &uld_mutex)) { |
6499 | NL_SET_ERR_MSG_MOD(extack, "crypto uld critical resource is under use" ); |
6500 | return -EBUSY; |
6501 | } |
6502 | ret = chcr_offload_state(adap, op_val: CXGB4_XFRMDEV_OPS); |
6503 | if (ret) |
6504 | goto out_unlock; |
6505 | |
6506 | ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); |
6507 | |
6508 | out_unlock: |
6509 | mutex_unlock(lock: &uld_mutex); |
6510 | |
6511 | return ret; |
6512 | } |
6513 | |
6514 | static void cxgb4_xfrm_del_state(struct xfrm_state *x) |
6515 | { |
6516 | struct adapter *adap = netdev2adap(dev: x->xso.dev); |
6517 | |
6518 | if (!mutex_trylock(lock: &uld_mutex)) { |
6519 | dev_dbg(adap->pdev_dev, |
6520 | "crypto uld critical resource is under use\n" ); |
6521 | return; |
6522 | } |
6523 | if (chcr_offload_state(adap, op_val: CXGB4_XFRMDEV_OPS)) |
6524 | goto out_unlock; |
6525 | |
6526 | adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); |
6527 | |
6528 | out_unlock: |
6529 | mutex_unlock(lock: &uld_mutex); |
6530 | } |
6531 | |
6532 | static void cxgb4_xfrm_free_state(struct xfrm_state *x) |
6533 | { |
6534 | struct adapter *adap = netdev2adap(dev: x->xso.dev); |
6535 | |
6536 | if (!mutex_trylock(lock: &uld_mutex)) { |
6537 | dev_dbg(adap->pdev_dev, |
6538 | "crypto uld critical resource is under use\n" ); |
6539 | return; |
6540 | } |
6541 | if (chcr_offload_state(adap, op_val: CXGB4_XFRMDEV_OPS)) |
6542 | goto out_unlock; |
6543 | |
6544 | adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); |
6545 | |
6546 | out_unlock: |
6547 | mutex_unlock(lock: &uld_mutex); |
6548 | } |
6549 | |
6550 | static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) |
6551 | { |
6552 | struct adapter *adap = netdev2adap(dev: x->xso.dev); |
6553 | bool ret = false; |
6554 | |
6555 | if (!mutex_trylock(lock: &uld_mutex)) { |
6556 | dev_dbg(adap->pdev_dev, |
6557 | "crypto uld critical resource is under use\n" ); |
6558 | return ret; |
6559 | } |
6560 | if (chcr_offload_state(adap, op_val: CXGB4_XFRMDEV_OPS)) |
6561 | goto out_unlock; |
6562 | |
6563 | ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); |
6564 | |
6565 | out_unlock: |
6566 | mutex_unlock(lock: &uld_mutex); |
6567 | return ret; |
6568 | } |
6569 | |
6570 | static void cxgb4_advance_esn_state(struct xfrm_state *x) |
6571 | { |
6572 | struct adapter *adap = netdev2adap(dev: x->xso.dev); |
6573 | |
6574 | if (!mutex_trylock(lock: &uld_mutex)) { |
6575 | dev_dbg(adap->pdev_dev, |
6576 | "crypto uld critical resource is under use\n" ); |
6577 | return; |
6578 | } |
6579 | if (chcr_offload_state(adap, op_val: CXGB4_XFRMDEV_OPS)) |
6580 | goto out_unlock; |
6581 | |
6582 | adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); |
6583 | |
6584 | out_unlock: |
6585 | mutex_unlock(lock: &uld_mutex); |
6586 | } |
6587 | |
6588 | static const struct xfrmdev_ops cxgb4_xfrmdev_ops = { |
6589 | .xdo_dev_state_add = cxgb4_xfrm_add_state, |
6590 | .xdo_dev_state_delete = cxgb4_xfrm_del_state, |
6591 | .xdo_dev_state_free = cxgb4_xfrm_free_state, |
6592 | .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, |
6593 | .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, |
6594 | }; |
6595 | |
6596 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
6597 | |
6598 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
6599 | { |
6600 | struct net_device *netdev; |
6601 | struct adapter *adapter; |
6602 | static int adap_idx = 1; |
6603 | int s_qpp, qpp, num_seg; |
6604 | struct port_info *pi; |
6605 | enum chip_type chip; |
6606 | void __iomem *regs; |
6607 | int func, chip_ver; |
6608 | u16 device_id; |
6609 | int i, err; |
6610 | u32 whoami; |
6611 | |
6612 | err = pci_request_regions(pdev, KBUILD_MODNAME); |
6613 | if (err) { |
6614 | /* Just info, some other driver may have claimed the device. */ |
6615 | dev_info(&pdev->dev, "cannot obtain PCI resources\n" ); |
6616 | return err; |
6617 | } |
6618 | |
6619 | err = pci_enable_device(dev: pdev); |
6620 | if (err) { |
6621 | dev_err(&pdev->dev, "cannot enable PCI device\n" ); |
6622 | goto out_release_regions; |
6623 | } |
6624 | |
6625 | regs = pci_ioremap_bar(pdev, bar: 0); |
6626 | if (!regs) { |
6627 | dev_err(&pdev->dev, "cannot map device registers\n" ); |
6628 | err = -ENOMEM; |
6629 | goto out_disable_device; |
6630 | } |
6631 | |
6632 | adapter = kzalloc(size: sizeof(*adapter), GFP_KERNEL); |
6633 | if (!adapter) { |
6634 | err = -ENOMEM; |
6635 | goto out_unmap_bar0; |
6636 | } |
6637 | |
6638 | adapter->regs = regs; |
6639 | err = t4_wait_dev_ready(regs); |
6640 | if (err < 0) |
6641 | goto out_free_adapter; |
6642 | |
6643 | /* We control everything through one PF */ |
6644 | whoami = t4_read_reg(adap: adapter, PL_WHOAMI_A); |
6645 | pci_read_config_word(dev: pdev, PCI_DEVICE_ID, val: &device_id); |
6646 | chip = t4_get_chip_type(adap: adapter, CHELSIO_PCI_ID_VER(device_id)); |
6647 | if ((int)chip < 0) { |
6648 | dev_err(&pdev->dev, "Device %d is not supported\n" , device_id); |
6649 | err = chip; |
6650 | goto out_free_adapter; |
6651 | } |
6652 | chip_ver = CHELSIO_CHIP_VERSION(chip); |
6653 | func = chip_ver <= CHELSIO_T5 ? |
6654 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); |
6655 | |
6656 | adapter->pdev = pdev; |
6657 | adapter->pdev_dev = &pdev->dev; |
6658 | adapter->name = pci_name(pdev); |
6659 | adapter->mbox = func; |
6660 | adapter->pf = func; |
6661 | adapter->params.chip = chip; |
6662 | adapter->adap_idx = adap_idx; |
6663 | adapter->msg_enable = DFLT_MSG_ENABLE; |
6664 | adapter->mbox_log = kzalloc(size: sizeof(*adapter->mbox_log) + |
6665 | (sizeof(struct mbox_cmd) * |
6666 | T4_OS_LOG_MBOX_CMDS), |
6667 | GFP_KERNEL); |
6668 | if (!adapter->mbox_log) { |
6669 | err = -ENOMEM; |
6670 | goto out_free_adapter; |
6671 | } |
6672 | spin_lock_init(&adapter->mbox_lock); |
6673 | INIT_LIST_HEAD(list: &adapter->mlist.list); |
6674 | adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; |
6675 | pci_set_drvdata(pdev, data: adapter); |
6676 | |
6677 | if (func != ent->driver_data) { |
6678 | pci_disable_device(dev: pdev); |
6679 | pci_save_state(dev: pdev); /* to restore SR-IOV later */ |
6680 | return 0; |
6681 | } |
6682 | |
6683 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
6684 | if (err) { |
6685 | dev_err(&pdev->dev, "no usable DMA configuration\n" ); |
6686 | goto out_free_adapter; |
6687 | } |
6688 | |
6689 | pci_set_master(dev: pdev); |
6690 | pci_save_state(dev: pdev); |
6691 | adap_idx++; |
6692 | adapter->workq = create_singlethread_workqueue("cxgb4" ); |
6693 | if (!adapter->workq) { |
6694 | err = -ENOMEM; |
6695 | goto out_free_adapter; |
6696 | } |
6697 | |
6698 | /* PCI device has been enabled */ |
6699 | adapter->flags |= CXGB4_DEV_ENABLED; |
6700 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
6701 | |
6702 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver |
6703 | * Ingress Packet Data to Free List Buffers in order to allow for |
6704 | * chipset performance optimizations between the Root Complex and |
6705 | * Memory Controllers. (Messages to the associated Ingress Queue |
6706 | * notifying new Packet Placement in the Free Lists Buffers will be |
6707 | * send without the Relaxed Ordering Attribute thus guaranteeing that |
6708 | * all preceding PCIe Transaction Layer Packets will be processed |
6709 | * first.) But some Root Complexes have various issues with Upstream |
6710 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. |
6711 | * The PCIe devices which under the Root Complexes will be cleared the |
6712 | * Relaxed Ordering bit in the configuration space, So we check our |
6713 | * PCIe configuration space to see if it's flagged with advice against |
6714 | * using Relaxed Ordering. |
6715 | */ |
6716 | if (!pcie_relaxed_ordering_enabled(dev: pdev)) |
6717 | adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; |
6718 | |
6719 | spin_lock_init(&adapter->stats_lock); |
6720 | spin_lock_init(&adapter->tid_release_lock); |
6721 | spin_lock_init(&adapter->win0_lock); |
6722 | |
6723 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); |
6724 | INIT_WORK(&adapter->db_full_task, process_db_full); |
6725 | INIT_WORK(&adapter->db_drop_task, process_db_drop); |
6726 | INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); |
6727 | |
6728 | err = t4_prep_adapter(adapter); |
6729 | if (err) |
6730 | goto out_free_adapter; |
6731 | |
6732 | if (is_kdump_kernel()) { |
6733 | /* Collect hardware state and append to /proc/vmcore */ |
6734 | err = cxgb4_cudbg_vmcore_add_dump(adap: adapter); |
6735 | if (err) { |
6736 | dev_warn(adapter->pdev_dev, |
6737 | "Fail collecting vmcore device dump, err: %d. Continuing\n" , |
6738 | err); |
6739 | err = 0; |
6740 | } |
6741 | } |
6742 | |
6743 | if (!is_t4(chip: adapter->params.chip)) { |
6744 | s_qpp = (QUEUESPERPAGEPF0_S + |
6745 | (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * |
6746 | adapter->pf); |
6747 | qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, |
6748 | SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); |
6749 | num_seg = PAGE_SIZE / SEGMENT_SIZE; |
6750 | |
6751 | /* Each segment size is 128B. Write coalescing is enabled only |
6752 | * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the |
6753 | * queue is less no of segments that can be accommodated in |
6754 | * a page size. |
6755 | */ |
6756 | if (qpp > num_seg) { |
6757 | dev_err(&pdev->dev, |
6758 | "Incorrect number of egress queues per page\n" ); |
6759 | err = -EINVAL; |
6760 | goto out_free_adapter; |
6761 | } |
6762 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), |
6763 | pci_resource_len(pdev, 2)); |
6764 | if (!adapter->bar2) { |
6765 | dev_err(&pdev->dev, "cannot map device bar2 region\n" ); |
6766 | err = -ENOMEM; |
6767 | goto out_free_adapter; |
6768 | } |
6769 | } |
6770 | |
6771 | setup_memwin(adapter); |
6772 | err = adap_init0(adap: adapter, vpd_skip: 0); |
6773 | if (err) |
6774 | goto out_unmap_bar; |
6775 | |
6776 | setup_memwin_rdma(adapter); |
6777 | |
6778 | /* configure SGE_STAT_CFG_A to read WC stats */ |
6779 | if (!is_t4(chip: adapter->params.chip)) |
6780 | t4_write_reg(adap: adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | |
6781 | (is_t5(chip: adapter->params.chip) ? STATMODE_V(0) : |
6782 | T6_STATMODE_V(0))); |
6783 | |
6784 | /* Initialize hash mac addr list */ |
6785 | INIT_LIST_HEAD(list: &adapter->mac_hlist); |
6786 | |
6787 | for_each_port(adapter, i) { |
6788 | /* For supporting MQPRIO Offload, need some extra |
6789 | * queues for each ETHOFLD TIDs. Keep it equal to |
6790 | * MAX_ATIDs for now. Once we connect to firmware |
6791 | * later and query the EOTID params, we'll come to |
6792 | * know the actual # of EOTIDs supported. |
6793 | */ |
6794 | netdev = alloc_etherdev_mq(sizeof(struct port_info), |
6795 | MAX_ETH_QSETS + MAX_ATIDS); |
6796 | if (!netdev) { |
6797 | err = -ENOMEM; |
6798 | goto out_free_dev; |
6799 | } |
6800 | |
6801 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6802 | |
6803 | adapter->port[i] = netdev; |
6804 | pi = netdev_priv(dev: netdev); |
6805 | pi->adapter = adapter; |
6806 | pi->xact_addr_filt = -1; |
6807 | pi->port_id = i; |
6808 | netdev->irq = pdev->irq; |
6809 | |
6810 | netdev->hw_features = NETIF_F_SG | TSO_FLAGS | |
6811 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
6812 | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | |
6813 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
6814 | NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA; |
6815 | |
6816 | if (chip_ver > CHELSIO_T5) { |
6817 | netdev->hw_enc_features |= NETIF_F_IP_CSUM | |
6818 | NETIF_F_IPV6_CSUM | |
6819 | NETIF_F_RXCSUM | |
6820 | NETIF_F_GSO_UDP_TUNNEL | |
6821 | NETIF_F_GSO_UDP_TUNNEL_CSUM | |
6822 | NETIF_F_TSO | NETIF_F_TSO6; |
6823 | |
6824 | netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | |
6825 | NETIF_F_GSO_UDP_TUNNEL_CSUM | |
6826 | NETIF_F_HW_TLS_RECORD; |
6827 | |
6828 | if (adapter->rawf_cnt) |
6829 | netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; |
6830 | } |
6831 | |
6832 | netdev->features |= netdev->hw_features; |
6833 | netdev->vlan_features = netdev->features & VLAN_FEAT; |
6834 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
6835 | if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { |
6836 | netdev->hw_features |= NETIF_F_HW_TLS_TX; |
6837 | netdev->tlsdev_ops = &cxgb4_ktls_ops; |
6838 | /* initialize the refcount */ |
6839 | refcount_set(r: &pi->adapter->chcr_ktls.ktls_refcount, n: 0); |
6840 | } |
6841 | #endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
6842 | #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
6843 | if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { |
6844 | netdev->hw_enc_features |= NETIF_F_HW_ESP; |
6845 | netdev->features |= NETIF_F_HW_ESP; |
6846 | netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; |
6847 | } |
6848 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
6849 | |
6850 | netdev->priv_flags |= IFF_UNICAST_FLT; |
6851 | |
6852 | /* MTU range: 81 - 9600 */ |
6853 | netdev->min_mtu = 81; /* accommodate SACK */ |
6854 | netdev->max_mtu = MAX_MTU; |
6855 | |
6856 | netdev->netdev_ops = &cxgb4_netdev_ops; |
6857 | #ifdef CONFIG_CHELSIO_T4_DCB |
6858 | netdev->dcbnl_ops = &cxgb4_dcb_ops; |
6859 | cxgb4_dcb_state_init(netdev); |
6860 | cxgb4_dcb_version_init(netdev); |
6861 | #endif |
6862 | cxgb4_set_ethtool_ops(netdev); |
6863 | } |
6864 | |
6865 | cxgb4_init_ethtool_dump(adapter); |
6866 | |
6867 | pci_set_drvdata(pdev, data: adapter); |
6868 | |
6869 | if (adapter->flags & CXGB4_FW_OK) { |
6870 | err = t4_port_init(adap: adapter, mbox: func, pf: func, vf: 0); |
6871 | if (err) |
6872 | goto out_free_dev; |
6873 | } else if (adapter->params.nports == 1) { |
6874 | /* If we don't have a connection to the firmware -- possibly |
6875 | * because of an error -- grab the raw VPD parameters so we |
6876 | * can set the proper MAC Address on the debug network |
6877 | * interface that we've created. |
6878 | */ |
6879 | u8 hw_addr[ETH_ALEN]; |
6880 | u8 *na = adapter->params.vpd.na; |
6881 | |
6882 | err = t4_get_raw_vpd_params(adapter, p: &adapter->params.vpd); |
6883 | if (!err) { |
6884 | for (i = 0; i < ETH_ALEN; i++) |
6885 | hw_addr[i] = (hex2val(c: na[2 * i + 0]) * 16 + |
6886 | hex2val(c: na[2 * i + 1])); |
6887 | t4_set_hw_addr(adapter, port_idx: 0, hw_addr); |
6888 | } |
6889 | } |
6890 | |
6891 | if (!(adapter->flags & CXGB4_FW_OK)) |
6892 | goto fw_attach_fail; |
6893 | |
6894 | /* Configure queues and allocate tables now, they can be needed as |
6895 | * soon as the first register_netdev completes. |
6896 | */ |
6897 | err = cfg_queues(adap: adapter); |
6898 | if (err) |
6899 | goto out_free_dev; |
6900 | |
6901 | adapter->smt = t4_init_smt(); |
6902 | if (!adapter->smt) { |
6903 | /* We tolerate a lack of SMT, giving up some functionality */ |
6904 | dev_warn(&pdev->dev, "could not allocate SMT, continuing\n" ); |
6905 | } |
6906 | |
6907 | adapter->l2t = t4_init_l2t(l2t_start: adapter->l2t_start, l2t_end: adapter->l2t_end); |
6908 | if (!adapter->l2t) { |
6909 | /* We tolerate a lack of L2T, giving up some functionality */ |
6910 | dev_warn(&pdev->dev, "could not allocate L2T, continuing\n" ); |
6911 | adapter->params.offload = 0; |
6912 | } |
6913 | |
6914 | #if IS_ENABLED(CONFIG_IPV6) |
6915 | if (chip_ver <= CHELSIO_T5 && |
6916 | (!(t4_read_reg(adap: adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { |
6917 | /* CLIP functionality is not present in hardware, |
6918 | * hence disable all offload features |
6919 | */ |
6920 | dev_warn(&pdev->dev, |
6921 | "CLIP not enabled in hardware, continuing\n" ); |
6922 | adapter->params.offload = 0; |
6923 | } else { |
6924 | adapter->clipt = t4_init_clip_tbl(clipt_start: adapter->clipt_start, |
6925 | clipt_end: adapter->clipt_end); |
6926 | if (!adapter->clipt) { |
6927 | /* We tolerate a lack of clip_table, giving up |
6928 | * some functionality |
6929 | */ |
6930 | dev_warn(&pdev->dev, |
6931 | "could not allocate Clip table, continuing\n" ); |
6932 | adapter->params.offload = 0; |
6933 | } |
6934 | } |
6935 | #endif |
6936 | |
6937 | for_each_port(adapter, i) { |
6938 | pi = adap2pinfo(adap: adapter, idx: i); |
6939 | pi->sched_tbl = t4_init_sched(size: adapter->params.nsched_cls); |
6940 | if (!pi->sched_tbl) |
6941 | dev_warn(&pdev->dev, |
6942 | "could not activate scheduling on port %d\n" , |
6943 | i); |
6944 | } |
6945 | |
6946 | if (is_offload(adap: adapter) || is_hashfilter(adap: adapter)) { |
6947 | if (t4_read_reg(adap: adapter, LE_DB_CONFIG_A) & HASHEN_F) { |
6948 | u32 v; |
6949 | |
6950 | v = t4_read_reg(adap: adapter, LE_DB_HASH_CONFIG_A); |
6951 | if (chip_ver <= CHELSIO_T5) { |
6952 | adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); |
6953 | v = t4_read_reg(adap: adapter, LE_DB_TID_HASHBASE_A); |
6954 | adapter->tids.hash_base = v / 4; |
6955 | } else { |
6956 | adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; |
6957 | v = t4_read_reg(adap: adapter, |
6958 | T6_LE_DB_HASH_TID_BASE_A); |
6959 | adapter->tids.hash_base = v; |
6960 | } |
6961 | } |
6962 | } |
6963 | |
6964 | if (tid_init(t: &adapter->tids) < 0) { |
6965 | dev_warn(&pdev->dev, "could not allocate TID table, " |
6966 | "continuing\n" ); |
6967 | adapter->params.offload = 0; |
6968 | } else { |
6969 | adapter->tc_u32 = cxgb4_init_tc_u32(adap: adapter); |
6970 | if (!adapter->tc_u32) |
6971 | dev_warn(&pdev->dev, |
6972 | "could not offload tc u32, continuing\n" ); |
6973 | |
6974 | if (cxgb4_init_tc_flower(adap: adapter)) |
6975 | dev_warn(&pdev->dev, |
6976 | "could not offload tc flower, continuing\n" ); |
6977 | |
6978 | if (cxgb4_init_tc_mqprio(adap: adapter)) |
6979 | dev_warn(&pdev->dev, |
6980 | "could not offload tc mqprio, continuing\n" ); |
6981 | |
6982 | if (cxgb4_init_tc_matchall(adap: adapter)) |
6983 | dev_warn(&pdev->dev, |
6984 | "could not offload tc matchall, continuing\n" ); |
6985 | if (cxgb4_init_ethtool_filters(adap: adapter)) |
6986 | dev_warn(&pdev->dev, |
6987 | "could not initialize ethtool filters, continuing\n" ); |
6988 | } |
6989 | |
6990 | /* See what interrupts we'll be using */ |
6991 | if (msi > 1 && enable_msix(adap: adapter) == 0) |
6992 | adapter->flags |= CXGB4_USING_MSIX; |
6993 | else if (msi > 0 && pci_enable_msi(dev: pdev) == 0) { |
6994 | adapter->flags |= CXGB4_USING_MSI; |
6995 | if (msi > 1) |
6996 | free_msix_info(adap: adapter); |
6997 | } |
6998 | |
6999 | /* check for PCI Express bandwidth capabiltites */ |
7000 | pcie_print_link_status(dev: pdev); |
7001 | |
7002 | cxgb4_init_mps_ref_entries(adap: adapter); |
7003 | |
7004 | err = init_rss(adap: adapter); |
7005 | if (err) |
7006 | goto out_free_dev; |
7007 | |
7008 | err = setup_non_data_intr(adapter); |
7009 | if (err) { |
7010 | dev_err(adapter->pdev_dev, |
7011 | "Non Data interrupt allocation failed, err: %d\n" , err); |
7012 | goto out_free_dev; |
7013 | } |
7014 | |
7015 | err = setup_fw_sge_queues(adapter); |
7016 | if (err) { |
7017 | dev_err(adapter->pdev_dev, |
7018 | "FW sge queue allocation failed, err %d" , err); |
7019 | goto out_free_dev; |
7020 | } |
7021 | |
7022 | fw_attach_fail: |
7023 | /* |
7024 | * The card is now ready to go. If any errors occur during device |
7025 | * registration we do not fail the whole card but rather proceed only |
7026 | * with the ports we manage to register successfully. However we must |
7027 | * register at least one net device. |
7028 | */ |
7029 | for_each_port(adapter, i) { |
7030 | pi = adap2pinfo(adap: adapter, idx: i); |
7031 | adapter->port[i]->dev_port = pi->lport; |
7032 | netif_set_real_num_tx_queues(dev: adapter->port[i], txq: pi->nqsets); |
7033 | netif_set_real_num_rx_queues(dev: adapter->port[i], rxq: pi->nqsets); |
7034 | |
7035 | netif_carrier_off(dev: adapter->port[i]); |
7036 | |
7037 | err = register_netdev(dev: adapter->port[i]); |
7038 | if (err) |
7039 | break; |
7040 | adapter->chan_map[pi->tx_chan] = i; |
7041 | print_port_info(dev: adapter->port[i]); |
7042 | } |
7043 | if (i == 0) { |
7044 | dev_err(&pdev->dev, "could not register any net devices\n" ); |
7045 | goto out_free_dev; |
7046 | } |
7047 | if (err) { |
7048 | dev_warn(&pdev->dev, "only %d net devices registered\n" , i); |
7049 | err = 0; |
7050 | } |
7051 | |
7052 | if (cxgb4_debugfs_root) { |
7053 | adapter->debugfs_root = debugfs_create_dir(name: pci_name(pdev), |
7054 | parent: cxgb4_debugfs_root); |
7055 | setup_debugfs(adapter); |
7056 | } |
7057 | |
7058 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ |
7059 | pdev->needs_freset = 1; |
7060 | |
7061 | if (is_uld(adap: adapter)) |
7062 | cxgb4_uld_enable(adap: adapter); |
7063 | |
7064 | if (!is_t4(chip: adapter->params.chip)) |
7065 | cxgb4_ptp_init(adap: adapter); |
7066 | |
7067 | if (IS_REACHABLE(CONFIG_THERMAL) && |
7068 | !is_t4(chip: adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) |
7069 | cxgb4_thermal_init(adap: adapter); |
7070 | |
7071 | print_adapter_info(adapter); |
7072 | return 0; |
7073 | |
7074 | out_free_dev: |
7075 | t4_free_sge_resources(adap: adapter); |
7076 | free_some_resources(adapter); |
7077 | if (adapter->flags & CXGB4_USING_MSIX) |
7078 | free_msix_info(adap: adapter); |
7079 | if (adapter->num_uld || adapter->num_ofld_uld) |
7080 | t4_uld_mem_free(adap: adapter); |
7081 | out_unmap_bar: |
7082 | if (!is_t4(chip: adapter->params.chip)) |
7083 | iounmap(addr: adapter->bar2); |
7084 | out_free_adapter: |
7085 | if (adapter->workq) |
7086 | destroy_workqueue(wq: adapter->workq); |
7087 | |
7088 | kfree(objp: adapter->mbox_log); |
7089 | kfree(objp: adapter); |
7090 | out_unmap_bar0: |
7091 | iounmap(addr: regs); |
7092 | out_disable_device: |
7093 | pci_disable_device(dev: pdev); |
7094 | out_release_regions: |
7095 | pci_release_regions(pdev); |
7096 | return err; |
7097 | } |
7098 | |
7099 | static void remove_one(struct pci_dev *pdev) |
7100 | { |
7101 | struct adapter *adapter = pci_get_drvdata(pdev); |
7102 | struct hash_mac_addr *entry, *tmp; |
7103 | |
7104 | if (!adapter) { |
7105 | pci_release_regions(pdev); |
7106 | return; |
7107 | } |
7108 | |
7109 | /* If we allocated filters, free up state associated with any |
7110 | * valid filters ... |
7111 | */ |
7112 | clear_all_filters(adapter); |
7113 | |
7114 | adapter->flags |= CXGB4_SHUTTING_DOWN; |
7115 | |
7116 | if (adapter->pf == 4) { |
7117 | int i; |
7118 | |
7119 | /* Tear down per-adapter Work Queue first since it can contain |
7120 | * references to our adapter data structure. |
7121 | */ |
7122 | destroy_workqueue(wq: adapter->workq); |
7123 | |
7124 | detach_ulds(adap: adapter); |
7125 | |
7126 | for_each_port(adapter, i) |
7127 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
7128 | unregister_netdev(dev: adapter->port[i]); |
7129 | |
7130 | t4_uld_clean_up(adap: adapter); |
7131 | |
7132 | adap_free_hma_mem(adapter); |
7133 | |
7134 | disable_interrupts(adap: adapter); |
7135 | |
7136 | cxgb4_free_mps_ref_entries(adap: adapter); |
7137 | |
7138 | debugfs_remove_recursive(dentry: adapter->debugfs_root); |
7139 | |
7140 | if (!is_t4(chip: adapter->params.chip)) |
7141 | cxgb4_ptp_stop(adap: adapter); |
7142 | if (IS_REACHABLE(CONFIG_THERMAL)) |
7143 | cxgb4_thermal_remove(adap: adapter); |
7144 | |
7145 | if (adapter->flags & CXGB4_FULL_INIT_DONE) |
7146 | cxgb_down(adapter); |
7147 | |
7148 | if (adapter->flags & CXGB4_USING_MSIX) |
7149 | free_msix_info(adap: adapter); |
7150 | if (adapter->num_uld || adapter->num_ofld_uld) |
7151 | t4_uld_mem_free(adap: adapter); |
7152 | free_some_resources(adapter); |
7153 | list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, |
7154 | list) { |
7155 | list_del(entry: &entry->list); |
7156 | kfree(objp: entry); |
7157 | } |
7158 | |
7159 | #if IS_ENABLED(CONFIG_IPV6) |
7160 | t4_cleanup_clip_tbl(adap: adapter); |
7161 | #endif |
7162 | if (!is_t4(chip: adapter->params.chip)) |
7163 | iounmap(addr: adapter->bar2); |
7164 | } |
7165 | #ifdef CONFIG_PCI_IOV |
7166 | else { |
7167 | cxgb4_iov_configure(pdev: adapter->pdev, num_vfs: 0); |
7168 | } |
7169 | #endif |
7170 | iounmap(addr: adapter->regs); |
7171 | if ((adapter->flags & CXGB4_DEV_ENABLED)) { |
7172 | pci_disable_device(dev: pdev); |
7173 | adapter->flags &= ~CXGB4_DEV_ENABLED; |
7174 | } |
7175 | pci_release_regions(pdev); |
7176 | kfree(objp: adapter->mbox_log); |
7177 | synchronize_rcu(); |
7178 | kfree(objp: adapter); |
7179 | } |
7180 | |
7181 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt |
7182 | * delivery. This is essentially a stripped down version of the PCI remove() |
7183 | * function where we do the minimal amount of work necessary to shutdown any |
7184 | * further activity. |
7185 | */ |
7186 | static void shutdown_one(struct pci_dev *pdev) |
7187 | { |
7188 | struct adapter *adapter = pci_get_drvdata(pdev); |
7189 | |
7190 | /* As with remove_one() above (see extended comment), we only want do |
7191 | * do cleanup on PCI Devices which went all the way through init_one() |
7192 | * ... |
7193 | */ |
7194 | if (!adapter) { |
7195 | pci_release_regions(pdev); |
7196 | return; |
7197 | } |
7198 | |
7199 | adapter->flags |= CXGB4_SHUTTING_DOWN; |
7200 | |
7201 | if (adapter->pf == 4) { |
7202 | int i; |
7203 | |
7204 | for_each_port(adapter, i) |
7205 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
7206 | cxgb_close(dev: adapter->port[i]); |
7207 | |
7208 | rtnl_lock(); |
7209 | cxgb4_mqprio_stop_offload(adap: adapter); |
7210 | rtnl_unlock(); |
7211 | |
7212 | if (is_uld(adap: adapter)) { |
7213 | detach_ulds(adap: adapter); |
7214 | t4_uld_clean_up(adap: adapter); |
7215 | } |
7216 | |
7217 | disable_interrupts(adap: adapter); |
7218 | disable_msi(adapter); |
7219 | |
7220 | t4_sge_stop(adap: adapter); |
7221 | if (adapter->flags & CXGB4_FW_OK) |
7222 | t4_fw_bye(adap: adapter, mbox: adapter->mbox); |
7223 | } |
7224 | } |
7225 | |
7226 | static struct pci_driver cxgb4_driver = { |
7227 | .name = KBUILD_MODNAME, |
7228 | .id_table = cxgb4_pci_tbl, |
7229 | .probe = init_one, |
7230 | .remove = remove_one, |
7231 | .shutdown = shutdown_one, |
7232 | #ifdef CONFIG_PCI_IOV |
7233 | .sriov_configure = cxgb4_iov_configure, |
7234 | #endif |
7235 | .err_handler = &cxgb4_eeh, |
7236 | }; |
7237 | |
7238 | static int __init cxgb4_init_module(void) |
7239 | { |
7240 | int ret; |
7241 | |
7242 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
7243 | |
7244 | ret = pci_register_driver(&cxgb4_driver); |
7245 | if (ret < 0) |
7246 | goto err_pci; |
7247 | |
7248 | #if IS_ENABLED(CONFIG_IPV6) |
7249 | if (!inet6addr_registered) { |
7250 | ret = register_inet6addr_notifier(nb: &cxgb4_inet6addr_notifier); |
7251 | if (ret) |
7252 | pci_unregister_driver(dev: &cxgb4_driver); |
7253 | else |
7254 | inet6addr_registered = true; |
7255 | } |
7256 | #endif |
7257 | |
7258 | if (ret == 0) |
7259 | return ret; |
7260 | |
7261 | err_pci: |
7262 | debugfs_remove(dentry: cxgb4_debugfs_root); |
7263 | |
7264 | return ret; |
7265 | } |
7266 | |
7267 | static void __exit cxgb4_cleanup_module(void) |
7268 | { |
7269 | #if IS_ENABLED(CONFIG_IPV6) |
7270 | if (inet6addr_registered) { |
7271 | unregister_inet6addr_notifier(nb: &cxgb4_inet6addr_notifier); |
7272 | inet6addr_registered = false; |
7273 | } |
7274 | #endif |
7275 | pci_unregister_driver(dev: &cxgb4_driver); |
7276 | debugfs_remove(dentry: cxgb4_debugfs_root); /* NULL ok */ |
7277 | } |
7278 | |
7279 | module_init(cxgb4_init_module); |
7280 | module_exit(cxgb4_cleanup_module); |
7281 | |