1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <asm/byteorder.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/if_vlan.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/stddef.h> |
15 | #include <linux/workqueue.h> |
16 | #include <net/ipv6.h> |
17 | #include <linux/bitops.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/errno.h> |
20 | #include <linux/etherdevice.h> |
21 | #include <linux/io.h> |
22 | #include <linux/list.h> |
23 | #include <linux/mutex.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/string.h> |
26 | #include <linux/qed/qed_ll2_if.h> |
27 | #include "qed.h" |
28 | #include "qed_cxt.h" |
29 | #include "qed_dev_api.h" |
30 | #include "qed_hsi.h" |
31 | #include "qed_iro_hsi.h" |
32 | #include "qed_hw.h" |
33 | #include "qed_int.h" |
34 | #include "qed_ll2.h" |
35 | #include "qed_mcp.h" |
36 | #include "qed_ooo.h" |
37 | #include "qed_reg_addr.h" |
38 | #include "qed_sp.h" |
39 | #include "qed_rdma.h" |
40 | |
41 | #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) |
42 | #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) |
43 | |
44 | #define QED_LL2_TX_SIZE (256) |
45 | #define QED_LL2_RX_SIZE (4096) |
46 | |
47 | #define QED_LL2_INVALID_STATS_ID 0xff |
48 | |
49 | struct qed_cb_ll2_info { |
50 | int rx_cnt; |
51 | u32 rx_size; |
52 | u8 handle; |
53 | |
54 | /* Lock protecting LL2 buffer lists in sleepless context */ |
55 | spinlock_t lock; |
56 | struct list_head list; |
57 | |
58 | const struct qed_ll2_cb_ops *cbs; |
59 | void *cb_cookie; |
60 | }; |
61 | |
62 | struct qed_ll2_buffer { |
63 | struct list_head list; |
64 | void *data; |
65 | dma_addr_t phys_addr; |
66 | }; |
67 | |
68 | static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, |
69 | u8 ll2_queue_type, u8 qid) |
70 | { |
71 | u8 stats_id; |
72 | |
73 | /* For legacy (RAM based) queues, the stats_id will be set as the |
74 | * queue_id. Otherwise (context based queue), it will be set to |
75 | * the "abs_pf_id" offset from the end of the RAM based queue IDs. |
76 | * If the final value exceeds the total counters amount, return |
77 | * INVALID value to indicate that the stats for this connection should |
78 | * be disabled. |
79 | */ |
80 | if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) |
81 | stats_id = qid; |
82 | else |
83 | stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; |
84 | |
85 | if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) |
86 | return stats_id; |
87 | else |
88 | return QED_LL2_INVALID_STATS_ID; |
89 | } |
90 | |
91 | static void qed_ll2b_complete_tx_packet(void *cxt, |
92 | u8 connection_handle, |
93 | void *cookie, |
94 | dma_addr_t first_frag_addr, |
95 | bool b_last_fragment, |
96 | bool b_last_packet) |
97 | { |
98 | struct qed_hwfn *p_hwfn = cxt; |
99 | struct qed_dev *cdev = p_hwfn->cdev; |
100 | struct sk_buff *skb = cookie; |
101 | |
102 | /* All we need to do is release the mapping */ |
103 | dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, |
104 | skb_headlen(skb), DMA_TO_DEVICE); |
105 | |
106 | if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) |
107 | cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, |
108 | b_last_fragment); |
109 | |
110 | dev_kfree_skb_any(skb); |
111 | } |
112 | |
113 | static int qed_ll2_alloc_buffer(struct qed_dev *cdev, |
114 | u8 **data, dma_addr_t *phys_addr) |
115 | { |
116 | size_t size = cdev->ll2->rx_size + NET_SKB_PAD + |
117 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
118 | |
119 | *data = kmalloc(size, GFP_ATOMIC); |
120 | if (!(*data)) { |
121 | DP_INFO(cdev, "Failed to allocate LL2 buffer data\n" ); |
122 | return -ENOMEM; |
123 | } |
124 | |
125 | *phys_addr = dma_map_single(&cdev->pdev->dev, |
126 | ((*data) + NET_SKB_PAD), |
127 | cdev->ll2->rx_size, DMA_FROM_DEVICE); |
128 | if (dma_mapping_error(dev: &cdev->pdev->dev, dma_addr: *phys_addr)) { |
129 | DP_INFO(cdev, "Failed to map LL2 buffer data\n" ); |
130 | kfree(objp: (*data)); |
131 | return -ENOMEM; |
132 | } |
133 | |
134 | return 0; |
135 | } |
136 | |
137 | static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, |
138 | struct qed_ll2_buffer *buffer) |
139 | { |
140 | spin_lock_bh(lock: &cdev->ll2->lock); |
141 | |
142 | dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, |
143 | cdev->ll2->rx_size, DMA_FROM_DEVICE); |
144 | kfree(objp: buffer->data); |
145 | list_del(entry: &buffer->list); |
146 | |
147 | cdev->ll2->rx_cnt--; |
148 | if (!cdev->ll2->rx_cnt) |
149 | DP_INFO(cdev, "All LL2 entries were removed\n" ); |
150 | |
151 | spin_unlock_bh(lock: &cdev->ll2->lock); |
152 | |
153 | return 0; |
154 | } |
155 | |
156 | static void qed_ll2_kill_buffers(struct qed_dev *cdev) |
157 | { |
158 | struct qed_ll2_buffer *buffer, *tmp_buffer; |
159 | |
160 | list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) |
161 | qed_ll2_dealloc_buffer(cdev, buffer); |
162 | } |
163 | |
164 | static void qed_ll2b_complete_rx_packet(void *cxt, |
165 | struct qed_ll2_comp_rx_data *data) |
166 | { |
167 | struct qed_hwfn *p_hwfn = cxt; |
168 | struct qed_ll2_buffer *buffer = data->cookie; |
169 | struct qed_dev *cdev = p_hwfn->cdev; |
170 | dma_addr_t new_phys_addr; |
171 | struct sk_buff *skb; |
172 | bool reuse = false; |
173 | int rc = -EINVAL; |
174 | u8 *new_data; |
175 | |
176 | DP_VERBOSE(p_hwfn, |
177 | (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), |
178 | "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n" , |
179 | (u64)data->rx_buf_addr, |
180 | data->u.placement_offset, |
181 | data->length.packet_length, |
182 | data->parse_flags, |
183 | data->vlan, data->opaque_data_0, data->opaque_data_1); |
184 | |
185 | if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { |
186 | print_hex_dump(KERN_INFO, prefix_str: "" , |
187 | prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, |
188 | buf: buffer->data, len: data->length.packet_length, ascii: false); |
189 | } |
190 | |
191 | /* Determine if data is valid */ |
192 | if (data->length.packet_length < ETH_HLEN) |
193 | reuse = true; |
194 | |
195 | /* Allocate a replacement for buffer; Reuse upon failure */ |
196 | if (!reuse) |
197 | rc = qed_ll2_alloc_buffer(cdev: p_hwfn->cdev, data: &new_data, |
198 | phys_addr: &new_phys_addr); |
199 | |
200 | /* If need to reuse or there's no replacement buffer, repost this */ |
201 | if (rc) |
202 | goto out_post; |
203 | dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, |
204 | cdev->ll2->rx_size, DMA_FROM_DEVICE); |
205 | |
206 | skb = slab_build_skb(data: buffer->data); |
207 | if (!skb) { |
208 | DP_INFO(cdev, "Failed to build SKB\n" ); |
209 | kfree(objp: buffer->data); |
210 | goto out_post1; |
211 | } |
212 | |
213 | data->u.placement_offset += NET_SKB_PAD; |
214 | skb_reserve(skb, len: data->u.placement_offset); |
215 | skb_put(skb, len: data->length.packet_length); |
216 | skb_checksum_none_assert(skb); |
217 | |
218 | /* Get parital ethernet information instead of eth_type_trans(), |
219 | * Since we don't have an associated net_device. |
220 | */ |
221 | skb_reset_mac_header(skb); |
222 | skb->protocol = eth_hdr(skb)->h_proto; |
223 | |
224 | /* Pass SKB onward */ |
225 | if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { |
226 | if (data->vlan) |
227 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
228 | vlan_tci: data->vlan); |
229 | cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, |
230 | data->opaque_data_0, |
231 | data->opaque_data_1); |
232 | } else { |
233 | DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | |
234 | QED_MSG_LL2 | QED_MSG_STORAGE), |
235 | "Dropping the packet\n" ); |
236 | kfree(objp: buffer->data); |
237 | } |
238 | |
239 | out_post1: |
240 | /* Update Buffer information and update FW producer */ |
241 | buffer->data = new_data; |
242 | buffer->phys_addr = new_phys_addr; |
243 | |
244 | out_post: |
245 | rc = qed_ll2_post_rx_buffer(cxt: p_hwfn, connection_handle: cdev->ll2->handle, |
246 | addr: buffer->phys_addr, buf_len: 0, cookie: buffer, notify_fw: 1); |
247 | if (rc) |
248 | qed_ll2_dealloc_buffer(cdev, buffer); |
249 | } |
250 | |
251 | static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, |
252 | u8 connection_handle, |
253 | bool b_lock, |
254 | bool b_only_active) |
255 | { |
256 | struct qed_ll2_info *p_ll2_conn, *p_ret = NULL; |
257 | |
258 | if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) |
259 | return NULL; |
260 | |
261 | if (!p_hwfn->p_ll2_info) |
262 | return NULL; |
263 | |
264 | p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; |
265 | |
266 | if (b_only_active) { |
267 | if (b_lock) |
268 | mutex_lock(&p_ll2_conn->mutex); |
269 | if (p_ll2_conn->b_active) |
270 | p_ret = p_ll2_conn; |
271 | if (b_lock) |
272 | mutex_unlock(lock: &p_ll2_conn->mutex); |
273 | } else { |
274 | p_ret = p_ll2_conn; |
275 | } |
276 | |
277 | return p_ret; |
278 | } |
279 | |
280 | static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, |
281 | u8 connection_handle) |
282 | { |
283 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, b_lock: false, b_only_active: true); |
284 | } |
285 | |
286 | static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn, |
287 | u8 connection_handle) |
288 | { |
289 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, b_lock: true, b_only_active: true); |
290 | } |
291 | |
292 | static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn |
293 | *p_hwfn, |
294 | u8 connection_handle) |
295 | { |
296 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, b_lock: false, b_only_active: false); |
297 | } |
298 | |
299 | static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) |
300 | { |
301 | bool b_last_packet = false, b_last_frag = false; |
302 | struct qed_ll2_tx_packet *p_pkt = NULL; |
303 | struct qed_ll2_info *p_ll2_conn; |
304 | struct qed_ll2_tx_queue *p_tx; |
305 | unsigned long flags = 0; |
306 | dma_addr_t tx_frag; |
307 | |
308 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); |
309 | if (!p_ll2_conn) |
310 | return; |
311 | |
312 | p_tx = &p_ll2_conn->tx_queue; |
313 | |
314 | spin_lock_irqsave(&p_tx->lock, flags); |
315 | while (!list_empty(head: &p_tx->active_descq)) { |
316 | p_pkt = list_first_entry(&p_tx->active_descq, |
317 | struct qed_ll2_tx_packet, list_entry); |
318 | if (!p_pkt) |
319 | break; |
320 | |
321 | list_del(entry: &p_pkt->list_entry); |
322 | b_last_packet = list_empty(head: &p_tx->active_descq); |
323 | list_add_tail(new: &p_pkt->list_entry, head: &p_tx->free_descq); |
324 | spin_unlock_irqrestore(lock: &p_tx->lock, flags); |
325 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { |
326 | struct qed_ooo_buffer *p_buffer; |
327 | |
328 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
329 | qed_ooo_put_free_buffer(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, |
330 | p_buffer); |
331 | } else { |
332 | p_tx->cur_completing_packet = *p_pkt; |
333 | p_tx->cur_completing_bd_idx = 1; |
334 | b_last_frag = |
335 | p_tx->cur_completing_bd_idx == p_pkt->bd_used; |
336 | tx_frag = p_pkt->bds_set[0].tx_frag; |
337 | p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, |
338 | p_ll2_conn->my_id, |
339 | p_pkt->cookie, |
340 | tx_frag, |
341 | b_last_frag, |
342 | b_last_packet); |
343 | } |
344 | spin_lock_irqsave(&p_tx->lock, flags); |
345 | } |
346 | spin_unlock_irqrestore(lock: &p_tx->lock, flags); |
347 | } |
348 | |
349 | static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) |
350 | { |
351 | struct qed_ll2_info *p_ll2_conn = p_cookie; |
352 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
353 | u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; |
354 | struct qed_ll2_tx_packet *p_pkt; |
355 | bool b_last_frag = false; |
356 | unsigned long flags; |
357 | int rc = -EINVAL; |
358 | |
359 | if (!p_ll2_conn) |
360 | return rc; |
361 | |
362 | spin_lock_irqsave(&p_tx->lock, flags); |
363 | if (p_tx->b_completing_packet) { |
364 | rc = -EBUSY; |
365 | goto out; |
366 | } |
367 | |
368 | new_idx = le16_to_cpu(*p_tx->p_fw_cons); |
369 | num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); |
370 | while (num_bds) { |
371 | if (list_empty(head: &p_tx->active_descq)) |
372 | goto out; |
373 | |
374 | p_pkt = list_first_entry(&p_tx->active_descq, |
375 | struct qed_ll2_tx_packet, list_entry); |
376 | if (!p_pkt) |
377 | goto out; |
378 | |
379 | p_tx->b_completing_packet = true; |
380 | p_tx->cur_completing_packet = *p_pkt; |
381 | num_bds_in_packet = p_pkt->bd_used; |
382 | list_del(entry: &p_pkt->list_entry); |
383 | |
384 | if (unlikely(num_bds < num_bds_in_packet)) { |
385 | DP_NOTICE(p_hwfn, |
386 | "Rest of BDs does not cover whole packet\n" ); |
387 | goto out; |
388 | } |
389 | |
390 | num_bds -= num_bds_in_packet; |
391 | p_tx->bds_idx += num_bds_in_packet; |
392 | while (num_bds_in_packet--) |
393 | qed_chain_consume(p_chain: &p_tx->txq_chain); |
394 | |
395 | p_tx->cur_completing_bd_idx = 1; |
396 | b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; |
397 | list_add_tail(new: &p_pkt->list_entry, head: &p_tx->free_descq); |
398 | |
399 | spin_unlock_irqrestore(lock: &p_tx->lock, flags); |
400 | |
401 | p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, |
402 | p_ll2_conn->my_id, |
403 | p_pkt->cookie, |
404 | p_pkt->bds_set[0].tx_frag, |
405 | b_last_frag, !num_bds); |
406 | |
407 | spin_lock_irqsave(&p_tx->lock, flags); |
408 | } |
409 | |
410 | p_tx->b_completing_packet = false; |
411 | rc = 0; |
412 | out: |
413 | spin_unlock_irqrestore(lock: &p_tx->lock, flags); |
414 | return rc; |
415 | } |
416 | |
417 | static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, |
418 | union core_rx_cqe_union *p_cqe, |
419 | struct qed_ll2_comp_rx_data *data) |
420 | { |
421 | data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); |
422 | data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); |
423 | data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); |
424 | data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); |
425 | data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); |
426 | data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; |
427 | data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); |
428 | |
429 | data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); |
430 | } |
431 | |
432 | static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, |
433 | union core_rx_cqe_union *p_cqe, |
434 | struct qed_ll2_comp_rx_data *data) |
435 | { |
436 | data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); |
437 | data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags); |
438 | data->length.packet_length = |
439 | le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); |
440 | data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); |
441 | data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); |
442 | data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); |
443 | data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; |
444 | } |
445 | |
446 | static int |
447 | qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, |
448 | struct qed_ll2_info *p_ll2_conn, |
449 | union core_rx_cqe_union *p_cqe, |
450 | unsigned long *p_lock_flags) |
451 | { |
452 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
453 | struct core_rx_slow_path_cqe *sp_cqe; |
454 | |
455 | sp_cqe = &p_cqe->rx_cqe_sp; |
456 | if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { |
457 | DP_NOTICE(p_hwfn, |
458 | "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n" , |
459 | sp_cqe->ramrod_cmd_id); |
460 | return -EINVAL; |
461 | } |
462 | |
463 | if (!p_ll2_conn->cbs.slowpath_cb) { |
464 | DP_NOTICE(p_hwfn, |
465 | "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n" ); |
466 | return -EINVAL; |
467 | } |
468 | |
469 | spin_unlock_irqrestore(lock: &p_rx->lock, flags: *p_lock_flags); |
470 | |
471 | p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, |
472 | p_ll2_conn->my_id, |
473 | le32_to_cpu(sp_cqe->opaque_data.data[0]), |
474 | le32_to_cpu(sp_cqe->opaque_data.data[1])); |
475 | |
476 | spin_lock_irqsave(&p_rx->lock, *p_lock_flags); |
477 | |
478 | return 0; |
479 | } |
480 | |
481 | static int |
482 | qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, |
483 | struct qed_ll2_info *p_ll2_conn, |
484 | union core_rx_cqe_union *p_cqe, |
485 | unsigned long *p_lock_flags, bool b_last_cqe) |
486 | { |
487 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
488 | struct qed_ll2_rx_packet *p_pkt = NULL; |
489 | struct qed_ll2_comp_rx_data data; |
490 | |
491 | if (!list_empty(head: &p_rx->active_descq)) |
492 | p_pkt = list_first_entry(&p_rx->active_descq, |
493 | struct qed_ll2_rx_packet, list_entry); |
494 | if (unlikely(!p_pkt)) { |
495 | DP_NOTICE(p_hwfn, |
496 | "[%d] LL2 Rx completion but active_descq is empty\n" , |
497 | p_ll2_conn->input.conn_type); |
498 | |
499 | return -EIO; |
500 | } |
501 | list_del(entry: &p_pkt->list_entry); |
502 | |
503 | if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) |
504 | qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, data: &data); |
505 | else |
506 | qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, data: &data); |
507 | if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) |
508 | DP_NOTICE(p_hwfn, |
509 | "Mismatch between active_descq and the LL2 Rx chain\n" ); |
510 | |
511 | list_add_tail(new: &p_pkt->list_entry, head: &p_rx->free_descq); |
512 | |
513 | data.connection_handle = p_ll2_conn->my_id; |
514 | data.cookie = p_pkt->cookie; |
515 | data.rx_buf_addr = p_pkt->rx_buf_addr; |
516 | data.b_last_packet = b_last_cqe; |
517 | |
518 | spin_unlock_irqrestore(lock: &p_rx->lock, flags: *p_lock_flags); |
519 | p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); |
520 | |
521 | spin_lock_irqsave(&p_rx->lock, *p_lock_flags); |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) |
527 | { |
528 | struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; |
529 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
530 | union core_rx_cqe_union *cqe = NULL; |
531 | u16 cq_new_idx = 0, cq_old_idx = 0; |
532 | unsigned long flags = 0; |
533 | int rc = 0; |
534 | |
535 | if (!p_ll2_conn) |
536 | return rc; |
537 | |
538 | spin_lock_irqsave(&p_rx->lock, flags); |
539 | |
540 | if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { |
541 | spin_unlock_irqrestore(lock: &p_rx->lock, flags); |
542 | return 0; |
543 | } |
544 | |
545 | cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); |
546 | cq_old_idx = qed_chain_get_cons_idx(chain: &p_rx->rcq_chain); |
547 | |
548 | while (cq_new_idx != cq_old_idx) { |
549 | bool b_last_cqe = (cq_new_idx == cq_old_idx); |
550 | |
551 | cqe = |
552 | (union core_rx_cqe_union *) |
553 | qed_chain_consume(p_chain: &p_rx->rcq_chain); |
554 | cq_old_idx = qed_chain_get_cons_idx(chain: &p_rx->rcq_chain); |
555 | |
556 | DP_VERBOSE(p_hwfn, |
557 | QED_MSG_LL2, |
558 | "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n" , |
559 | cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); |
560 | |
561 | switch (cqe->rx_cqe_sp.type) { |
562 | case CORE_RX_CQE_TYPE_SLOW_PATH: |
563 | rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, |
564 | p_cqe: cqe, p_lock_flags: &flags); |
565 | break; |
566 | case CORE_RX_CQE_TYPE_GSI_OFFLOAD: |
567 | case CORE_RX_CQE_TYPE_REGULAR: |
568 | rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, |
569 | p_cqe: cqe, p_lock_flags: &flags, |
570 | b_last_cqe); |
571 | break; |
572 | default: |
573 | rc = -EIO; |
574 | } |
575 | } |
576 | |
577 | spin_unlock_irqrestore(lock: &p_rx->lock, flags); |
578 | return rc; |
579 | } |
580 | |
581 | static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) |
582 | { |
583 | struct qed_ll2_info *p_ll2_conn = NULL; |
584 | struct qed_ll2_rx_packet *p_pkt = NULL; |
585 | struct qed_ll2_rx_queue *p_rx; |
586 | unsigned long flags = 0; |
587 | |
588 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); |
589 | if (!p_ll2_conn) |
590 | return; |
591 | |
592 | p_rx = &p_ll2_conn->rx_queue; |
593 | |
594 | spin_lock_irqsave(&p_rx->lock, flags); |
595 | while (!list_empty(head: &p_rx->active_descq)) { |
596 | p_pkt = list_first_entry(&p_rx->active_descq, |
597 | struct qed_ll2_rx_packet, list_entry); |
598 | if (!p_pkt) |
599 | break; |
600 | list_move_tail(list: &p_pkt->list_entry, head: &p_rx->free_descq); |
601 | spin_unlock_irqrestore(lock: &p_rx->lock, flags); |
602 | |
603 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { |
604 | struct qed_ooo_buffer *p_buffer; |
605 | |
606 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
607 | qed_ooo_put_free_buffer(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, |
608 | p_buffer); |
609 | } else { |
610 | dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; |
611 | void *cookie = p_pkt->cookie; |
612 | bool b_last; |
613 | |
614 | b_last = list_empty(head: &p_rx->active_descq); |
615 | p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, |
616 | p_ll2_conn->my_id, |
617 | cookie, |
618 | rx_buf_addr, b_last); |
619 | } |
620 | spin_lock_irqsave(&p_rx->lock, flags); |
621 | } |
622 | spin_unlock_irqrestore(lock: &p_rx->lock, flags); |
623 | } |
624 | |
625 | static bool |
626 | qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, |
627 | struct core_rx_slow_path_cqe *p_cqe) |
628 | { |
629 | struct ooo_opaque *ooo_opq; |
630 | u32 cid; |
631 | |
632 | if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) |
633 | return false; |
634 | |
635 | ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; |
636 | if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) |
637 | return false; |
638 | |
639 | /* Need to make a flush */ |
640 | cid = le32_to_cpu(ooo_opq->cid); |
641 | qed_ooo_release_connection_isles(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, cid); |
642 | |
643 | return true; |
644 | } |
645 | |
646 | static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, |
647 | struct qed_ll2_info *p_ll2_conn) |
648 | { |
649 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
650 | u16 packet_length = 0, parse_flags = 0, vlan = 0; |
651 | struct qed_ll2_rx_packet *p_pkt = NULL; |
652 | union core_rx_cqe_union *cqe = NULL; |
653 | u16 cq_new_idx = 0, cq_old_idx = 0; |
654 | struct qed_ooo_buffer *p_buffer; |
655 | struct ooo_opaque *ooo_opq; |
656 | u8 placement_offset = 0; |
657 | u8 cqe_type; |
658 | u32 cid; |
659 | |
660 | cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); |
661 | cq_old_idx = qed_chain_get_cons_idx(chain: &p_rx->rcq_chain); |
662 | if (cq_new_idx == cq_old_idx) |
663 | return 0; |
664 | |
665 | while (cq_new_idx != cq_old_idx) { |
666 | struct core_rx_fast_path_cqe *p_cqe_fp; |
667 | |
668 | cqe = qed_chain_consume(p_chain: &p_rx->rcq_chain); |
669 | cq_old_idx = qed_chain_get_cons_idx(chain: &p_rx->rcq_chain); |
670 | cqe_type = cqe->rx_cqe_sp.type; |
671 | |
672 | if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) |
673 | if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, |
674 | p_cqe: &cqe->rx_cqe_sp)) |
675 | continue; |
676 | |
677 | if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { |
678 | DP_NOTICE(p_hwfn, |
679 | "Got a non-regular LB LL2 completion [type 0x%02x]\n" , |
680 | cqe_type); |
681 | return -EINVAL; |
682 | } |
683 | p_cqe_fp = &cqe->rx_cqe_fp; |
684 | |
685 | placement_offset = p_cqe_fp->placement_offset; |
686 | parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); |
687 | packet_length = le16_to_cpu(p_cqe_fp->packet_length); |
688 | vlan = le16_to_cpu(p_cqe_fp->vlan); |
689 | ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; |
690 | qed_ooo_save_history_entry(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, p_cqe: ooo_opq); |
691 | cid = le32_to_cpu(ooo_opq->cid); |
692 | |
693 | /* Process delete isle first */ |
694 | if (ooo_opq->drop_size) |
695 | qed_ooo_delete_isles(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, cid, |
696 | drop_isle: ooo_opq->drop_isle, |
697 | drop_size: ooo_opq->drop_size); |
698 | |
699 | if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) |
700 | continue; |
701 | |
702 | /* Now process create/add/join isles */ |
703 | if (unlikely(list_empty(&p_rx->active_descq))) { |
704 | DP_NOTICE(p_hwfn, |
705 | "LL2 OOO RX chain has no submitted buffers\n" |
706 | ); |
707 | return -EIO; |
708 | } |
709 | |
710 | p_pkt = list_first_entry(&p_rx->active_descq, |
711 | struct qed_ll2_rx_packet, list_entry); |
712 | |
713 | if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || |
714 | ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || |
715 | ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || |
716 | ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || |
717 | ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { |
718 | if (unlikely(!p_pkt)) { |
719 | DP_NOTICE(p_hwfn, |
720 | "LL2 OOO RX packet is not valid\n" ); |
721 | return -EIO; |
722 | } |
723 | list_del(entry: &p_pkt->list_entry); |
724 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
725 | p_buffer->packet_length = packet_length; |
726 | p_buffer->parse_flags = parse_flags; |
727 | p_buffer->vlan = vlan; |
728 | p_buffer->placement_offset = placement_offset; |
729 | qed_chain_consume(p_chain: &p_rx->rxq_chain); |
730 | list_add_tail(new: &p_pkt->list_entry, head: &p_rx->free_descq); |
731 | |
732 | switch (ooo_opq->ooo_opcode) { |
733 | case TCP_EVENT_ADD_NEW_ISLE: |
734 | qed_ooo_add_new_isle(p_hwfn, |
735 | p_ooo_info: p_hwfn->p_ooo_info, |
736 | cid, |
737 | ooo_isle: ooo_opq->ooo_isle, |
738 | p_buffer); |
739 | break; |
740 | case TCP_EVENT_ADD_ISLE_RIGHT: |
741 | qed_ooo_add_new_buffer(p_hwfn, |
742 | p_ooo_info: p_hwfn->p_ooo_info, |
743 | cid, |
744 | ooo_isle: ooo_opq->ooo_isle, |
745 | p_buffer, |
746 | QED_OOO_RIGHT_BUF); |
747 | break; |
748 | case TCP_EVENT_ADD_ISLE_LEFT: |
749 | qed_ooo_add_new_buffer(p_hwfn, |
750 | p_ooo_info: p_hwfn->p_ooo_info, |
751 | cid, |
752 | ooo_isle: ooo_opq->ooo_isle, |
753 | p_buffer, |
754 | QED_OOO_LEFT_BUF); |
755 | break; |
756 | case TCP_EVENT_JOIN: |
757 | qed_ooo_add_new_buffer(p_hwfn, |
758 | p_ooo_info: p_hwfn->p_ooo_info, |
759 | cid, |
760 | ooo_isle: ooo_opq->ooo_isle + 1, |
761 | p_buffer, |
762 | QED_OOO_LEFT_BUF); |
763 | qed_ooo_join_isles(p_hwfn, |
764 | p_ooo_info: p_hwfn->p_ooo_info, |
765 | cid, left_isle: ooo_opq->ooo_isle); |
766 | break; |
767 | case TCP_EVENT_ADD_PEN: |
768 | qed_ooo_put_ready_buffer(p_hwfn, |
769 | p_ooo_info: p_hwfn->p_ooo_info, |
770 | p_buffer, on_tail: true); |
771 | break; |
772 | } |
773 | } else { |
774 | DP_NOTICE(p_hwfn, |
775 | "Unexpected event (%d) TX OOO completion\n" , |
776 | ooo_opq->ooo_opcode); |
777 | } |
778 | } |
779 | |
780 | return 0; |
781 | } |
782 | |
783 | static void |
784 | qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, |
785 | struct qed_ll2_info *p_ll2_conn) |
786 | { |
787 | struct qed_ll2_tx_pkt_info tx_pkt; |
788 | struct qed_ooo_buffer *p_buffer; |
789 | u16 l4_hdr_offset_w; |
790 | dma_addr_t first_frag; |
791 | u8 bd_flags; |
792 | int rc; |
793 | |
794 | /* Submit Tx buffers here */ |
795 | while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, |
796 | p_ooo_info: p_hwfn->p_ooo_info))) { |
797 | l4_hdr_offset_w = 0; |
798 | bd_flags = 0; |
799 | |
800 | first_frag = p_buffer->rx_buffer_phys_addr + |
801 | p_buffer->placement_offset; |
802 | SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); |
803 | SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); |
804 | |
805 | memset(&tx_pkt, 0, sizeof(tx_pkt)); |
806 | tx_pkt.num_of_bds = 1; |
807 | tx_pkt.vlan = p_buffer->vlan; |
808 | tx_pkt.bd_flags = bd_flags; |
809 | tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; |
810 | switch (p_ll2_conn->tx_dest) { |
811 | case CORE_TX_DEST_NW: |
812 | tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; |
813 | break; |
814 | case CORE_TX_DEST_LB: |
815 | tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; |
816 | break; |
817 | case CORE_TX_DEST_DROP: |
818 | default: |
819 | tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; |
820 | break; |
821 | } |
822 | tx_pkt.first_frag = first_frag; |
823 | tx_pkt.first_frag_len = p_buffer->packet_length; |
824 | tx_pkt.cookie = p_buffer; |
825 | |
826 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: p_ll2_conn->my_id, |
827 | pkt: &tx_pkt, notify_fw: true); |
828 | if (rc) { |
829 | qed_ooo_put_ready_buffer(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, |
830 | p_buffer, on_tail: false); |
831 | break; |
832 | } |
833 | } |
834 | } |
835 | |
836 | static void |
837 | qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, |
838 | struct qed_ll2_info *p_ll2_conn) |
839 | { |
840 | struct qed_ooo_buffer *p_buffer; |
841 | int rc; |
842 | |
843 | while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, |
844 | p_ooo_info: p_hwfn->p_ooo_info))) { |
845 | rc = qed_ll2_post_rx_buffer(cxt: p_hwfn, |
846 | connection_handle: p_ll2_conn->my_id, |
847 | addr: p_buffer->rx_buffer_phys_addr, |
848 | buf_len: 0, cookie: p_buffer, notify_fw: true); |
849 | if (rc) { |
850 | qed_ooo_put_free_buffer(p_hwfn, |
851 | p_ooo_info: p_hwfn->p_ooo_info, p_buffer); |
852 | break; |
853 | } |
854 | } |
855 | } |
856 | |
857 | static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) |
858 | { |
859 | struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; |
860 | int rc; |
861 | |
862 | if (!p_ll2_conn) |
863 | return 0; |
864 | |
865 | if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) |
866 | return 0; |
867 | |
868 | rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); |
869 | if (rc) |
870 | return rc; |
871 | |
872 | qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); |
873 | qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); |
874 | |
875 | return 0; |
876 | } |
877 | |
878 | static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) |
879 | { |
880 | struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; |
881 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
882 | struct qed_ll2_tx_packet *p_pkt = NULL; |
883 | struct qed_ooo_buffer *p_buffer; |
884 | bool b_dont_submit_rx = false; |
885 | u16 new_idx = 0, num_bds = 0; |
886 | int rc; |
887 | |
888 | if (unlikely(!p_ll2_conn)) |
889 | return 0; |
890 | |
891 | if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) |
892 | return 0; |
893 | |
894 | new_idx = le16_to_cpu(*p_tx->p_fw_cons); |
895 | num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); |
896 | |
897 | if (unlikely(!num_bds)) |
898 | return 0; |
899 | |
900 | while (num_bds) { |
901 | if (list_empty(head: &p_tx->active_descq)) |
902 | return -EINVAL; |
903 | |
904 | p_pkt = list_first_entry(&p_tx->active_descq, |
905 | struct qed_ll2_tx_packet, list_entry); |
906 | if (unlikely(!p_pkt)) |
907 | return -EINVAL; |
908 | |
909 | if (unlikely(p_pkt->bd_used != 1)) { |
910 | DP_NOTICE(p_hwfn, |
911 | "Unexpectedly many BDs(%d) in TX OOO completion\n" , |
912 | p_pkt->bd_used); |
913 | return -EINVAL; |
914 | } |
915 | |
916 | list_del(entry: &p_pkt->list_entry); |
917 | |
918 | num_bds--; |
919 | p_tx->bds_idx++; |
920 | qed_chain_consume(p_chain: &p_tx->txq_chain); |
921 | |
922 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
923 | list_add_tail(new: &p_pkt->list_entry, head: &p_tx->free_descq); |
924 | |
925 | if (b_dont_submit_rx) { |
926 | qed_ooo_put_free_buffer(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, |
927 | p_buffer); |
928 | continue; |
929 | } |
930 | |
931 | rc = qed_ll2_post_rx_buffer(cxt: p_hwfn, connection_handle: p_ll2_conn->my_id, |
932 | addr: p_buffer->rx_buffer_phys_addr, buf_len: 0, |
933 | cookie: p_buffer, notify_fw: true); |
934 | if (rc != 0) { |
935 | qed_ooo_put_free_buffer(p_hwfn, |
936 | p_ooo_info: p_hwfn->p_ooo_info, p_buffer); |
937 | b_dont_submit_rx = true; |
938 | } |
939 | } |
940 | |
941 | qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); |
942 | |
943 | return 0; |
944 | } |
945 | |
946 | static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn) |
947 | { |
948 | u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; |
949 | |
950 | DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2), |
951 | "Stopping LL2 OOO queue [%02x]\n" , *handle); |
952 | |
953 | qed_ll2_terminate_connection(cxt: p_hwfn, connection_handle: *handle); |
954 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: *handle); |
955 | *handle = QED_LL2_UNUSED_HANDLE; |
956 | } |
957 | |
958 | static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, |
959 | struct qed_ll2_info *p_ll2_conn, |
960 | u8 action_on_error) |
961 | { |
962 | enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; |
963 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
964 | struct core_rx_start_ramrod_data *p_ramrod = NULL; |
965 | struct qed_spq_entry *p_ent = NULL; |
966 | struct qed_sp_init_data init_data; |
967 | u16 cqe_pbl_size; |
968 | int rc = 0; |
969 | |
970 | /* Get SPQ entry */ |
971 | memset(&init_data, 0, sizeof(init_data)); |
972 | init_data.cid = p_ll2_conn->cid; |
973 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
974 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
975 | |
976 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
977 | cmd: CORE_RAMROD_RX_QUEUE_START, |
978 | protocol: PROTOCOLID_CORE, p_data: &init_data); |
979 | if (rc) |
980 | return rc; |
981 | |
982 | p_ramrod = &p_ent->ramrod.core_rx_queue_start; |
983 | memset(p_ramrod, 0, sizeof(*p_ramrod)); |
984 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); |
985 | p_ramrod->sb_index = p_rx->rx_sb_index; |
986 | p_ramrod->complete_event_flg = 1; |
987 | |
988 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); |
989 | DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); |
990 | cqe_pbl_size = (u16)qed_chain_get_page_cnt(chain: &p_rx->rcq_chain); |
991 | p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); |
992 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, |
993 | qed_chain_get_pbl_phys(&p_rx->rcq_chain)); |
994 | |
995 | p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; |
996 | p_ramrod->inner_vlan_stripping_en = |
997 | p_ll2_conn->input.rx_vlan_removal_en; |
998 | |
999 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && |
1000 | p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) |
1001 | p_ramrod->report_outer_vlan = 1; |
1002 | p_ramrod->queue_id = p_ll2_conn->queue_id; |
1003 | p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; |
1004 | |
1005 | if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && |
1006 | p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && |
1007 | conn_type != QED_LL2_TYPE_IWARP && |
1008 | (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) { |
1009 | p_ramrod->mf_si_bcast_accept_all = 1; |
1010 | p_ramrod->mf_si_mcast_accept_all = 1; |
1011 | } else { |
1012 | p_ramrod->mf_si_bcast_accept_all = 0; |
1013 | p_ramrod->mf_si_mcast_accept_all = 0; |
1014 | } |
1015 | |
1016 | p_ramrod->action_on_error.error_type = action_on_error; |
1017 | p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; |
1018 | p_ramrod->zero_prod_flg = 1; |
1019 | |
1020 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1021 | } |
1022 | |
1023 | static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, |
1024 | struct qed_ll2_info *p_ll2_conn) |
1025 | { |
1026 | enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; |
1027 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
1028 | struct core_tx_start_ramrod_data *p_ramrod = NULL; |
1029 | struct qed_spq_entry *p_ent = NULL; |
1030 | struct qed_sp_init_data init_data; |
1031 | u16 pq_id = 0, pbl_size; |
1032 | int rc = -EINVAL; |
1033 | |
1034 | if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) |
1035 | return 0; |
1036 | |
1037 | if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) |
1038 | p_ll2_conn->tx_stats_en = 0; |
1039 | else |
1040 | p_ll2_conn->tx_stats_en = 1; |
1041 | |
1042 | /* Get SPQ entry */ |
1043 | memset(&init_data, 0, sizeof(init_data)); |
1044 | init_data.cid = p_ll2_conn->cid; |
1045 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1046 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1047 | |
1048 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1049 | cmd: CORE_RAMROD_TX_QUEUE_START, |
1050 | protocol: PROTOCOLID_CORE, p_data: &init_data); |
1051 | if (rc) |
1052 | return rc; |
1053 | |
1054 | p_ramrod = &p_ent->ramrod.core_tx_queue_start; |
1055 | |
1056 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); |
1057 | p_ramrod->sb_index = p_tx->tx_sb_index; |
1058 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); |
1059 | p_ramrod->stats_en = p_ll2_conn->tx_stats_en; |
1060 | p_ramrod->stats_id = p_ll2_conn->tx_stats_id; |
1061 | |
1062 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, |
1063 | qed_chain_get_pbl_phys(&p_tx->txq_chain)); |
1064 | pbl_size = qed_chain_get_page_cnt(chain: &p_tx->txq_chain); |
1065 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); |
1066 | |
1067 | switch (p_ll2_conn->input.tx_tc) { |
1068 | case PURE_LB_TC: |
1069 | pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); |
1070 | break; |
1071 | case PKT_LB_TC: |
1072 | pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); |
1073 | break; |
1074 | default: |
1075 | pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); |
1076 | break; |
1077 | } |
1078 | |
1079 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); |
1080 | |
1081 | switch (conn_type) { |
1082 | case QED_LL2_TYPE_FCOE: |
1083 | p_ramrod->conn_type = PROTOCOLID_FCOE; |
1084 | break; |
1085 | case QED_LL2_TYPE_TCP_ULP: |
1086 | p_ramrod->conn_type = PROTOCOLID_TCP_ULP; |
1087 | break; |
1088 | case QED_LL2_TYPE_ROCE: |
1089 | p_ramrod->conn_type = PROTOCOLID_ROCE; |
1090 | break; |
1091 | case QED_LL2_TYPE_IWARP: |
1092 | p_ramrod->conn_type = PROTOCOLID_IWARP; |
1093 | break; |
1094 | case QED_LL2_TYPE_OOO: |
1095 | if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || |
1096 | p_hwfn->hw_info.personality == QED_PCI_NVMETCP) |
1097 | p_ramrod->conn_type = PROTOCOLID_TCP_ULP; |
1098 | else |
1099 | p_ramrod->conn_type = PROTOCOLID_IWARP; |
1100 | break; |
1101 | default: |
1102 | p_ramrod->conn_type = PROTOCOLID_ETH; |
1103 | DP_NOTICE(p_hwfn, "Unknown connection type: %d\n" , conn_type); |
1104 | } |
1105 | |
1106 | p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; |
1107 | |
1108 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1109 | if (rc) |
1110 | return rc; |
1111 | |
1112 | rc = qed_db_recovery_add(cdev: p_hwfn->cdev, db_addr: p_tx->doorbell_addr, |
1113 | db_data: &p_tx->db_msg, db_width: DB_REC_WIDTH_32B, |
1114 | db_space: DB_REC_KERNEL); |
1115 | return rc; |
1116 | } |
1117 | |
1118 | static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, |
1119 | struct qed_ll2_info *p_ll2_conn) |
1120 | { |
1121 | struct core_rx_stop_ramrod_data *p_ramrod = NULL; |
1122 | struct qed_spq_entry *p_ent = NULL; |
1123 | struct qed_sp_init_data init_data; |
1124 | int rc = -EINVAL; |
1125 | |
1126 | /* Get SPQ entry */ |
1127 | memset(&init_data, 0, sizeof(init_data)); |
1128 | init_data.cid = p_ll2_conn->cid; |
1129 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1130 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1131 | |
1132 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1133 | cmd: CORE_RAMROD_RX_QUEUE_STOP, |
1134 | protocol: PROTOCOLID_CORE, p_data: &init_data); |
1135 | if (rc) |
1136 | return rc; |
1137 | |
1138 | p_ramrod = &p_ent->ramrod.core_rx_queue_stop; |
1139 | |
1140 | p_ramrod->complete_event_flg = 1; |
1141 | p_ramrod->queue_id = p_ll2_conn->queue_id; |
1142 | |
1143 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1144 | } |
1145 | |
1146 | static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, |
1147 | struct qed_ll2_info *p_ll2_conn) |
1148 | { |
1149 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
1150 | struct qed_spq_entry *p_ent = NULL; |
1151 | struct qed_sp_init_data init_data; |
1152 | int rc = -EINVAL; |
1153 | |
1154 | qed_db_recovery_del(cdev: p_hwfn->cdev, db_addr: p_tx->doorbell_addr, db_data: &p_tx->db_msg); |
1155 | |
1156 | /* Get SPQ entry */ |
1157 | memset(&init_data, 0, sizeof(init_data)); |
1158 | init_data.cid = p_ll2_conn->cid; |
1159 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1160 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1161 | |
1162 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1163 | cmd: CORE_RAMROD_TX_QUEUE_STOP, |
1164 | protocol: PROTOCOLID_CORE, p_data: &init_data); |
1165 | if (rc) |
1166 | return rc; |
1167 | |
1168 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1169 | } |
1170 | |
1171 | static int |
1172 | qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, |
1173 | struct qed_ll2_info *p_ll2_info) |
1174 | { |
1175 | struct qed_chain_init_params params = { |
1176 | .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
1177 | .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
1178 | .num_elems = p_ll2_info->input.rx_num_desc, |
1179 | }; |
1180 | struct qed_dev *cdev = p_hwfn->cdev; |
1181 | struct qed_ll2_rx_packet *p_descq; |
1182 | u32 capacity; |
1183 | int rc = 0; |
1184 | |
1185 | if (!p_ll2_info->input.rx_num_desc) |
1186 | goto out; |
1187 | |
1188 | params.mode = QED_CHAIN_MODE_NEXT_PTR; |
1189 | params.elem_size = sizeof(struct core_rx_bd); |
1190 | |
1191 | rc = qed_chain_alloc(cdev, chain: &p_ll2_info->rx_queue.rxq_chain, params: ¶ms); |
1192 | if (rc) { |
1193 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n" ); |
1194 | goto out; |
1195 | } |
1196 | |
1197 | capacity = qed_chain_get_capacity(p_chain: &p_ll2_info->rx_queue.rxq_chain); |
1198 | p_descq = kcalloc(n: capacity, size: sizeof(struct qed_ll2_rx_packet), |
1199 | GFP_KERNEL); |
1200 | if (!p_descq) { |
1201 | rc = -ENOMEM; |
1202 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n" ); |
1203 | goto out; |
1204 | } |
1205 | p_ll2_info->rx_queue.descq_array = p_descq; |
1206 | |
1207 | params.mode = QED_CHAIN_MODE_PBL; |
1208 | params.elem_size = sizeof(struct core_rx_fast_path_cqe); |
1209 | |
1210 | rc = qed_chain_alloc(cdev, chain: &p_ll2_info->rx_queue.rcq_chain, params: ¶ms); |
1211 | if (rc) { |
1212 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n" ); |
1213 | goto out; |
1214 | } |
1215 | |
1216 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, |
1217 | "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n" , |
1218 | p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); |
1219 | |
1220 | out: |
1221 | return rc; |
1222 | } |
1223 | |
1224 | static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, |
1225 | struct qed_ll2_info *p_ll2_info) |
1226 | { |
1227 | struct qed_chain_init_params params = { |
1228 | .mode = QED_CHAIN_MODE_PBL, |
1229 | .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
1230 | .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
1231 | .num_elems = p_ll2_info->input.tx_num_desc, |
1232 | .elem_size = sizeof(struct core_tx_bd), |
1233 | }; |
1234 | struct qed_ll2_tx_packet *p_descq; |
1235 | size_t desc_size; |
1236 | u32 capacity; |
1237 | int rc = 0; |
1238 | |
1239 | if (!p_ll2_info->input.tx_num_desc) |
1240 | goto out; |
1241 | |
1242 | rc = qed_chain_alloc(cdev: p_hwfn->cdev, chain: &p_ll2_info->tx_queue.txq_chain, |
1243 | params: ¶ms); |
1244 | if (rc) |
1245 | goto out; |
1246 | |
1247 | capacity = qed_chain_get_capacity(p_chain: &p_ll2_info->tx_queue.txq_chain); |
1248 | /* All bds_set elements are flexibily added. */ |
1249 | desc_size = struct_size(p_descq, bds_set, |
1250 | p_ll2_info->input.tx_max_bds_per_packet); |
1251 | |
1252 | p_descq = kcalloc(n: capacity, size: desc_size, GFP_KERNEL); |
1253 | if (!p_descq) { |
1254 | rc = -ENOMEM; |
1255 | goto out; |
1256 | } |
1257 | p_ll2_info->tx_queue.descq_mem = p_descq; |
1258 | |
1259 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, |
1260 | "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n" , |
1261 | p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); |
1262 | |
1263 | out: |
1264 | if (rc) |
1265 | DP_NOTICE(p_hwfn, |
1266 | "Can't allocate memory for Tx LL2 with 0x%08x buffers\n" , |
1267 | p_ll2_info->input.tx_num_desc); |
1268 | return rc; |
1269 | } |
1270 | |
1271 | static int |
1272 | qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, |
1273 | struct qed_ll2_info *p_ll2_info, u16 mtu) |
1274 | { |
1275 | struct qed_ooo_buffer *p_buf = NULL; |
1276 | void *p_virt; |
1277 | u16 buf_idx; |
1278 | int rc = 0; |
1279 | |
1280 | if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) |
1281 | return rc; |
1282 | |
1283 | /* Correct number of requested OOO buffers if needed */ |
1284 | if (!p_ll2_info->input.rx_num_ooo_buffers) { |
1285 | u16 num_desc = p_ll2_info->input.rx_num_desc; |
1286 | |
1287 | if (!num_desc) |
1288 | return -EINVAL; |
1289 | p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; |
1290 | } |
1291 | |
1292 | for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; |
1293 | buf_idx++) { |
1294 | p_buf = kzalloc(size: sizeof(*p_buf), GFP_KERNEL); |
1295 | if (!p_buf) { |
1296 | rc = -ENOMEM; |
1297 | goto out; |
1298 | } |
1299 | |
1300 | p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; |
1301 | p_buf->rx_buffer_size = (p_buf->rx_buffer_size + |
1302 | ETH_CACHE_LINE_SIZE - 1) & |
1303 | ~(ETH_CACHE_LINE_SIZE - 1); |
1304 | p_virt = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
1305 | size: p_buf->rx_buffer_size, |
1306 | dma_handle: &p_buf->rx_buffer_phys_addr, |
1307 | GFP_KERNEL); |
1308 | if (!p_virt) { |
1309 | kfree(objp: p_buf); |
1310 | rc = -ENOMEM; |
1311 | goto out; |
1312 | } |
1313 | |
1314 | p_buf->rx_buffer_virt_addr = p_virt; |
1315 | qed_ooo_put_free_buffer(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info, p_buffer: p_buf); |
1316 | } |
1317 | |
1318 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, |
1319 | "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n" , |
1320 | p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); |
1321 | |
1322 | out: |
1323 | return rc; |
1324 | } |
1325 | |
1326 | static int |
1327 | qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) |
1328 | { |
1329 | if (!cbs || (!cbs->rx_comp_cb || |
1330 | !cbs->rx_release_cb || |
1331 | !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) |
1332 | return -EINVAL; |
1333 | |
1334 | p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; |
1335 | p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; |
1336 | p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; |
1337 | p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; |
1338 | p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; |
1339 | p_ll2_info->cbs.cookie = cbs->cookie; |
1340 | |
1341 | return 0; |
1342 | } |
1343 | |
1344 | static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn, |
1345 | struct qed_ll2_acquire_data *data, |
1346 | u8 *start_idx, u8 *last_idx) |
1347 | { |
1348 | /* LL2 queues handles will be split as follows: |
1349 | * First will be the legacy queues, and then the ctx based. |
1350 | */ |
1351 | if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { |
1352 | *start_idx = QED_LL2_LEGACY_CONN_BASE_PF; |
1353 | *last_idx = *start_idx + |
1354 | QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF; |
1355 | } else { |
1356 | /* QED_LL2_RX_TYPE_CTX */ |
1357 | *start_idx = QED_LL2_CTX_CONN_BASE_PF; |
1358 | *last_idx = *start_idx + |
1359 | QED_MAX_NUM_OF_CTX_LL2_CONNS_PF; |
1360 | } |
1361 | } |
1362 | |
1363 | static enum core_error_handle |
1364 | qed_ll2_get_error_choice(enum qed_ll2_error_handle err) |
1365 | { |
1366 | switch (err) { |
1367 | case QED_LL2_DROP_PACKET: |
1368 | return LL2_DROP_PACKET; |
1369 | case QED_LL2_DO_NOTHING: |
1370 | return LL2_DO_NOTHING; |
1371 | case QED_LL2_ASSERT: |
1372 | return LL2_ASSERT; |
1373 | default: |
1374 | return LL2_DO_NOTHING; |
1375 | } |
1376 | } |
1377 | |
1378 | int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) |
1379 | { |
1380 | struct qed_hwfn *p_hwfn = cxt; |
1381 | qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; |
1382 | struct qed_ll2_info *p_ll2_info = NULL; |
1383 | u8 i, first_idx, last_idx, *p_tx_max; |
1384 | int rc; |
1385 | |
1386 | if (!data->p_connection_handle || !p_hwfn->p_ll2_info) |
1387 | return -EINVAL; |
1388 | |
1389 | _qed_ll2_calc_allowed_conns(p_hwfn, data, start_idx: &first_idx, last_idx: &last_idx); |
1390 | |
1391 | /* Find a free connection to be used */ |
1392 | for (i = first_idx; i < last_idx; i++) { |
1393 | mutex_lock(&p_hwfn->p_ll2_info[i].mutex); |
1394 | if (p_hwfn->p_ll2_info[i].b_active) { |
1395 | mutex_unlock(lock: &p_hwfn->p_ll2_info[i].mutex); |
1396 | continue; |
1397 | } |
1398 | |
1399 | p_hwfn->p_ll2_info[i].b_active = true; |
1400 | p_ll2_info = &p_hwfn->p_ll2_info[i]; |
1401 | mutex_unlock(lock: &p_hwfn->p_ll2_info[i].mutex); |
1402 | break; |
1403 | } |
1404 | if (!p_ll2_info) |
1405 | return -EBUSY; |
1406 | |
1407 | memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); |
1408 | |
1409 | switch (data->input.tx_dest) { |
1410 | case QED_LL2_TX_DEST_NW: |
1411 | p_ll2_info->tx_dest = CORE_TX_DEST_NW; |
1412 | break; |
1413 | case QED_LL2_TX_DEST_LB: |
1414 | p_ll2_info->tx_dest = CORE_TX_DEST_LB; |
1415 | break; |
1416 | case QED_LL2_TX_DEST_DROP: |
1417 | p_ll2_info->tx_dest = CORE_TX_DEST_DROP; |
1418 | break; |
1419 | default: |
1420 | return -EINVAL; |
1421 | } |
1422 | |
1423 | if (data->input.conn_type == QED_LL2_TYPE_OOO || |
1424 | data->input.secondary_queue) |
1425 | p_ll2_info->main_func_queue = false; |
1426 | else |
1427 | p_ll2_info->main_func_queue = true; |
1428 | |
1429 | /* Correct maximum number of Tx BDs */ |
1430 | p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; |
1431 | if (*p_tx_max == 0) |
1432 | *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; |
1433 | else |
1434 | *p_tx_max = min_t(u8, *p_tx_max, |
1435 | CORE_LL2_TX_MAX_BDS_PER_PACKET); |
1436 | |
1437 | rc = qed_ll2_set_cbs(p_ll2_info, cbs: data->cbs); |
1438 | if (rc) { |
1439 | DP_NOTICE(p_hwfn, "Invalid callback functions\n" ); |
1440 | goto q_allocate_fail; |
1441 | } |
1442 | |
1443 | rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); |
1444 | if (rc) |
1445 | goto q_allocate_fail; |
1446 | |
1447 | rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); |
1448 | if (rc) |
1449 | goto q_allocate_fail; |
1450 | |
1451 | rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, |
1452 | mtu: data->input.mtu); |
1453 | if (rc) |
1454 | goto q_allocate_fail; |
1455 | |
1456 | /* Register callbacks for the Rx/Tx queues */ |
1457 | if (data->input.conn_type == QED_LL2_TYPE_OOO) { |
1458 | comp_rx_cb = qed_ll2_lb_rxq_completion; |
1459 | comp_tx_cb = qed_ll2_lb_txq_completion; |
1460 | } else { |
1461 | comp_rx_cb = qed_ll2_rxq_completion; |
1462 | comp_tx_cb = qed_ll2_txq_completion; |
1463 | } |
1464 | |
1465 | if (data->input.rx_num_desc) { |
1466 | qed_int_register_cb(p_hwfn, comp_cb: comp_rx_cb, |
1467 | cookie: &p_hwfn->p_ll2_info[i], |
1468 | sb_idx: &p_ll2_info->rx_queue.rx_sb_index, |
1469 | p_fw_cons: &p_ll2_info->rx_queue.p_fw_cons); |
1470 | p_ll2_info->rx_queue.b_cb_registered = true; |
1471 | } |
1472 | |
1473 | if (data->input.tx_num_desc) { |
1474 | qed_int_register_cb(p_hwfn, |
1475 | comp_cb: comp_tx_cb, |
1476 | cookie: &p_hwfn->p_ll2_info[i], |
1477 | sb_idx: &p_ll2_info->tx_queue.tx_sb_index, |
1478 | p_fw_cons: &p_ll2_info->tx_queue.p_fw_cons); |
1479 | p_ll2_info->tx_queue.b_cb_registered = true; |
1480 | } |
1481 | |
1482 | *data->p_connection_handle = i; |
1483 | return rc; |
1484 | |
1485 | q_allocate_fail: |
1486 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: i); |
1487 | return -ENOMEM; |
1488 | } |
1489 | |
1490 | static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, |
1491 | struct qed_ll2_info *p_ll2_conn) |
1492 | { |
1493 | enum qed_ll2_error_handle error_input; |
1494 | enum core_error_handle error_mode; |
1495 | u8 action_on_error = 0; |
1496 | int rc; |
1497 | |
1498 | if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) |
1499 | return 0; |
1500 | |
1501 | DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); |
1502 | error_input = p_ll2_conn->input.ai_err_packet_too_big; |
1503 | error_mode = qed_ll2_get_error_choice(err: error_input); |
1504 | SET_FIELD(action_on_error, |
1505 | CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); |
1506 | error_input = p_ll2_conn->input.ai_err_no_buf; |
1507 | error_mode = qed_ll2_get_error_choice(err: error_input); |
1508 | SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); |
1509 | |
1510 | rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); |
1511 | if (rc) |
1512 | return rc; |
1513 | |
1514 | if (p_ll2_conn->rx_queue.ctx_based) { |
1515 | rc = qed_db_recovery_add(cdev: p_hwfn->cdev, |
1516 | db_addr: p_ll2_conn->rx_queue.set_prod_addr, |
1517 | db_data: &p_ll2_conn->rx_queue.db_data, |
1518 | db_width: DB_REC_WIDTH_64B, db_space: DB_REC_KERNEL); |
1519 | } |
1520 | |
1521 | return rc; |
1522 | } |
1523 | |
1524 | static void |
1525 | qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, |
1526 | struct qed_ll2_info *p_ll2_conn) |
1527 | { |
1528 | if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) |
1529 | return; |
1530 | |
1531 | qed_ooo_release_all_isles(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info); |
1532 | qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); |
1533 | } |
1534 | |
1535 | static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, |
1536 | u8 handle, |
1537 | u8 ll2_queue_type) |
1538 | { |
1539 | u8 qid; |
1540 | |
1541 | if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) |
1542 | return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle; |
1543 | |
1544 | /* QED_LL2_RX_TYPE_CTX |
1545 | * FW distinguishes between the legacy queues (ram based) and the |
1546 | * ctx based queues by the queue_id. |
1547 | * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy |
1548 | * and the queue ids above that are ctx base. |
1549 | */ |
1550 | qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] + |
1551 | MAX_NUM_LL2_RX_RAM_QUEUES; |
1552 | |
1553 | /* See comment on the acquire connection for how the ll2 |
1554 | * queues handles are divided. |
1555 | */ |
1556 | qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF); |
1557 | |
1558 | return qid; |
1559 | } |
1560 | |
1561 | int qed_ll2_establish_connection(void *cxt, u8 connection_handle) |
1562 | { |
1563 | struct core_conn_context *p_cxt; |
1564 | struct qed_ll2_tx_packet *p_pkt; |
1565 | struct qed_ll2_info *p_ll2_conn; |
1566 | struct qed_hwfn *p_hwfn = cxt; |
1567 | struct qed_ll2_rx_queue *p_rx; |
1568 | struct qed_ll2_tx_queue *p_tx; |
1569 | struct qed_cxt_info cxt_info; |
1570 | struct qed_ptt *p_ptt; |
1571 | int rc = -EINVAL; |
1572 | u32 i, capacity; |
1573 | size_t desc_size; |
1574 | u8 qid, stats_id; |
1575 | |
1576 | p_ptt = qed_ptt_acquire(p_hwfn); |
1577 | if (!p_ptt) |
1578 | return -EAGAIN; |
1579 | |
1580 | p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); |
1581 | if (!p_ll2_conn) { |
1582 | rc = -EINVAL; |
1583 | goto out; |
1584 | } |
1585 | |
1586 | p_rx = &p_ll2_conn->rx_queue; |
1587 | p_tx = &p_ll2_conn->tx_queue; |
1588 | |
1589 | qed_chain_reset(p_chain: &p_rx->rxq_chain); |
1590 | qed_chain_reset(p_chain: &p_rx->rcq_chain); |
1591 | INIT_LIST_HEAD(list: &p_rx->active_descq); |
1592 | INIT_LIST_HEAD(list: &p_rx->free_descq); |
1593 | INIT_LIST_HEAD(list: &p_rx->posting_descq); |
1594 | spin_lock_init(&p_rx->lock); |
1595 | capacity = qed_chain_get_capacity(p_chain: &p_rx->rxq_chain); |
1596 | for (i = 0; i < capacity; i++) |
1597 | list_add_tail(new: &p_rx->descq_array[i].list_entry, |
1598 | head: &p_rx->free_descq); |
1599 | *p_rx->p_fw_cons = 0; |
1600 | |
1601 | qed_chain_reset(p_chain: &p_tx->txq_chain); |
1602 | INIT_LIST_HEAD(list: &p_tx->active_descq); |
1603 | INIT_LIST_HEAD(list: &p_tx->free_descq); |
1604 | INIT_LIST_HEAD(list: &p_tx->sending_descq); |
1605 | spin_lock_init(&p_tx->lock); |
1606 | capacity = qed_chain_get_capacity(p_chain: &p_tx->txq_chain); |
1607 | /* All bds_set elements are flexibily added. */ |
1608 | desc_size = struct_size(p_pkt, bds_set, |
1609 | p_ll2_conn->input.tx_max_bds_per_packet); |
1610 | |
1611 | for (i = 0; i < capacity; i++) { |
1612 | p_pkt = p_tx->descq_mem + desc_size * i; |
1613 | list_add_tail(new: &p_pkt->list_entry, head: &p_tx->free_descq); |
1614 | } |
1615 | p_tx->cur_completing_bd_idx = 0; |
1616 | p_tx->bds_idx = 0; |
1617 | p_tx->b_completing_packet = false; |
1618 | p_tx->cur_send_packet = NULL; |
1619 | p_tx->cur_send_frag_num = 0; |
1620 | p_tx->cur_completing_frag_num = 0; |
1621 | *p_tx->p_fw_cons = 0; |
1622 | |
1623 | rc = qed_cxt_acquire_cid(p_hwfn, type: PROTOCOLID_CORE, p_cid: &p_ll2_conn->cid); |
1624 | if (rc) |
1625 | goto out; |
1626 | cxt_info.iid = p_ll2_conn->cid; |
1627 | rc = qed_cxt_get_cid_info(p_hwfn, p_info: &cxt_info); |
1628 | if (rc) { |
1629 | DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n" , |
1630 | p_ll2_conn->cid); |
1631 | goto out; |
1632 | } |
1633 | |
1634 | p_cxt = cxt_info.p_cxt; |
1635 | |
1636 | memset(p_cxt, 0, sizeof(*p_cxt)); |
1637 | |
1638 | qid = qed_ll2_handle_to_queue_id(p_hwfn, handle: connection_handle, |
1639 | ll2_queue_type: p_ll2_conn->input.rx_conn_type); |
1640 | stats_id = qed_ll2_handle_to_stats_id(p_hwfn, |
1641 | ll2_queue_type: p_ll2_conn->input.rx_conn_type, |
1642 | qid); |
1643 | p_ll2_conn->queue_id = qid; |
1644 | p_ll2_conn->tx_stats_id = stats_id; |
1645 | |
1646 | /* If there is no valid stats id for this connection, disable stats */ |
1647 | if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { |
1648 | p_ll2_conn->tx_stats_en = 0; |
1649 | DP_VERBOSE(p_hwfn, |
1650 | QED_MSG_LL2, |
1651 | "Disabling stats for queue %d - not enough counters\n" , |
1652 | qid); |
1653 | } |
1654 | |
1655 | DP_VERBOSE(p_hwfn, |
1656 | QED_MSG_LL2, |
1657 | "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n" , |
1658 | p_hwfn->rel_pf_id, |
1659 | p_ll2_conn->input.rx_conn_type, qid, stats_id); |
1660 | |
1661 | if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { |
1662 | p_rx->set_prod_addr = |
1663 | (u8 __iomem *)p_hwfn->regview + |
1664 | GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, |
1665 | TSTORM_LL2_RX_PRODS, qid); |
1666 | } else { |
1667 | /* QED_LL2_RX_TYPE_CTX - using doorbell */ |
1668 | p_rx->ctx_based = 1; |
1669 | |
1670 | p_rx->set_prod_addr = p_hwfn->doorbells + |
1671 | p_hwfn->dpi_start_offset + |
1672 | DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE); |
1673 | |
1674 | /* prepare db data */ |
1675 | p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid); |
1676 | SET_FIELD(p_rx->db_data.params, |
1677 | CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET); |
1678 | SET_FIELD(p_rx->db_data.params, |
1679 | CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0); |
1680 | } |
1681 | |
1682 | p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + |
1683 | qed_db_addr(cid: p_ll2_conn->cid, |
1684 | DQ_DEMS_LEGACY); |
1685 | /* prepare db data */ |
1686 | SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); |
1687 | SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); |
1688 | SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, |
1689 | DQ_XCM_CORE_TX_BD_PROD_CMD); |
1690 | p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; |
1691 | |
1692 | rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); |
1693 | if (rc) |
1694 | goto out; |
1695 | |
1696 | rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); |
1697 | if (rc) |
1698 | goto out; |
1699 | |
1700 | if (!QED_IS_RDMA_PERSONALITY(p_hwfn) && |
1701 | !QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
1702 | qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, val: 1); |
1703 | |
1704 | qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); |
1705 | |
1706 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { |
1707 | if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) |
1708 | qed_llh_add_protocol_filter(cdev: p_hwfn->cdev, ppfid: 0, |
1709 | type: QED_LLH_FILTER_ETHERTYPE, |
1710 | ETH_P_FCOE, dest_port: 0); |
1711 | qed_llh_add_protocol_filter(cdev: p_hwfn->cdev, ppfid: 0, |
1712 | type: QED_LLH_FILTER_ETHERTYPE, |
1713 | ETH_P_FIP, dest_port: 0); |
1714 | } |
1715 | |
1716 | out: |
1717 | qed_ptt_release(p_hwfn, p_ptt); |
1718 | return rc; |
1719 | } |
1720 | |
1721 | static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, |
1722 | struct qed_ll2_rx_queue *p_rx, |
1723 | struct qed_ll2_rx_packet *p_curp) |
1724 | { |
1725 | struct qed_ll2_rx_packet *p_posting_packet = NULL; |
1726 | struct core_ll2_rx_prod rx_prod = { 0, 0 }; |
1727 | bool b_notify_fw = false; |
1728 | u16 bd_prod, cq_prod; |
1729 | |
1730 | /* This handles the flushing of already posted buffers */ |
1731 | while (!list_empty(head: &p_rx->posting_descq)) { |
1732 | p_posting_packet = list_first_entry(&p_rx->posting_descq, |
1733 | struct qed_ll2_rx_packet, |
1734 | list_entry); |
1735 | list_move_tail(list: &p_posting_packet->list_entry, |
1736 | head: &p_rx->active_descq); |
1737 | b_notify_fw = true; |
1738 | } |
1739 | |
1740 | /* This handles the supplied packet [if there is one] */ |
1741 | if (p_curp) { |
1742 | list_add_tail(new: &p_curp->list_entry, head: &p_rx->active_descq); |
1743 | b_notify_fw = true; |
1744 | } |
1745 | |
1746 | if (!b_notify_fw) |
1747 | return; |
1748 | |
1749 | bd_prod = qed_chain_get_prod_idx(chain: &p_rx->rxq_chain); |
1750 | cq_prod = qed_chain_get_prod_idx(chain: &p_rx->rcq_chain); |
1751 | if (p_rx->ctx_based) { |
1752 | /* update producer by giving a doorbell */ |
1753 | p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod); |
1754 | p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod); |
1755 | /* Make sure chain element is updated before ringing the |
1756 | * doorbell |
1757 | */ |
1758 | dma_wmb(); |
1759 | DIRECT_REG_WR64(p_rx->set_prod_addr, |
1760 | *((u64 *)&p_rx->db_data)); |
1761 | } else { |
1762 | rx_prod.bd_prod = cpu_to_le16(bd_prod); |
1763 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); |
1764 | |
1765 | /* Make sure chain element is updated before ringing the |
1766 | * doorbell |
1767 | */ |
1768 | dma_wmb(); |
1769 | |
1770 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); |
1771 | } |
1772 | } |
1773 | |
1774 | int qed_ll2_post_rx_buffer(void *cxt, |
1775 | u8 connection_handle, |
1776 | dma_addr_t addr, |
1777 | u16 buf_len, void *cookie, u8 notify_fw) |
1778 | { |
1779 | struct qed_hwfn *p_hwfn = cxt; |
1780 | struct core_rx_bd_with_buff_len *p_curb = NULL; |
1781 | struct qed_ll2_rx_packet *p_curp = NULL; |
1782 | struct qed_ll2_info *p_ll2_conn; |
1783 | struct qed_ll2_rx_queue *p_rx; |
1784 | unsigned long flags; |
1785 | void *p_data; |
1786 | int rc = 0; |
1787 | |
1788 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); |
1789 | if (!p_ll2_conn) |
1790 | return -EINVAL; |
1791 | p_rx = &p_ll2_conn->rx_queue; |
1792 | if (!p_rx->set_prod_addr) |
1793 | return -EIO; |
1794 | |
1795 | spin_lock_irqsave(&p_rx->lock, flags); |
1796 | if (!list_empty(head: &p_rx->free_descq)) |
1797 | p_curp = list_first_entry(&p_rx->free_descq, |
1798 | struct qed_ll2_rx_packet, list_entry); |
1799 | if (p_curp) { |
1800 | if (qed_chain_get_elem_left(chain: &p_rx->rxq_chain) && |
1801 | qed_chain_get_elem_left(chain: &p_rx->rcq_chain)) { |
1802 | p_data = qed_chain_produce(p_chain: &p_rx->rxq_chain); |
1803 | p_curb = (struct core_rx_bd_with_buff_len *)p_data; |
1804 | qed_chain_produce(p_chain: &p_rx->rcq_chain); |
1805 | } |
1806 | } |
1807 | |
1808 | /* If we're lacking entries, let's try to flush buffers to FW */ |
1809 | if (!p_curp || !p_curb) { |
1810 | rc = -EBUSY; |
1811 | p_curp = NULL; |
1812 | goto out_notify; |
1813 | } |
1814 | |
1815 | /* We have an Rx packet we can fill */ |
1816 | DMA_REGPAIR_LE(p_curb->addr, addr); |
1817 | p_curb->buff_length = cpu_to_le16(buf_len); |
1818 | p_curp->rx_buf_addr = addr; |
1819 | p_curp->cookie = cookie; |
1820 | p_curp->rxq_bd = p_curb; |
1821 | p_curp->buf_length = buf_len; |
1822 | list_del(entry: &p_curp->list_entry); |
1823 | |
1824 | /* Check if we only want to enqueue this packet without informing FW */ |
1825 | if (!notify_fw) { |
1826 | list_add_tail(new: &p_curp->list_entry, head: &p_rx->posting_descq); |
1827 | goto out; |
1828 | } |
1829 | |
1830 | out_notify: |
1831 | qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); |
1832 | out: |
1833 | spin_unlock_irqrestore(lock: &p_rx->lock, flags); |
1834 | return rc; |
1835 | } |
1836 | |
1837 | static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, |
1838 | struct qed_ll2_tx_queue *p_tx, |
1839 | struct qed_ll2_tx_packet *p_curp, |
1840 | struct qed_ll2_tx_pkt_info *pkt, |
1841 | u8 notify_fw) |
1842 | { |
1843 | list_del(entry: &p_curp->list_entry); |
1844 | p_curp->cookie = pkt->cookie; |
1845 | p_curp->bd_used = pkt->num_of_bds; |
1846 | p_curp->notify_fw = notify_fw; |
1847 | p_tx->cur_send_packet = p_curp; |
1848 | p_tx->cur_send_frag_num = 0; |
1849 | |
1850 | p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; |
1851 | p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; |
1852 | p_tx->cur_send_frag_num++; |
1853 | } |
1854 | |
1855 | static void |
1856 | qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, |
1857 | struct qed_ll2_info *p_ll2, |
1858 | struct qed_ll2_tx_packet *p_curp, |
1859 | struct qed_ll2_tx_pkt_info *pkt) |
1860 | { |
1861 | struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; |
1862 | u16 prod_idx = qed_chain_get_prod_idx(chain: p_tx_chain); |
1863 | struct core_tx_bd *start_bd = NULL; |
1864 | enum core_roce_flavor_type roce_flavor; |
1865 | enum core_tx_dest tx_dest; |
1866 | u16 bd_data = 0, frag_idx; |
1867 | u16 bitfield1; |
1868 | |
1869 | roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE |
1870 | : CORE_RROCE; |
1871 | |
1872 | switch (pkt->tx_dest) { |
1873 | case QED_LL2_TX_DEST_NW: |
1874 | tx_dest = CORE_TX_DEST_NW; |
1875 | break; |
1876 | case QED_LL2_TX_DEST_LB: |
1877 | tx_dest = CORE_TX_DEST_LB; |
1878 | break; |
1879 | case QED_LL2_TX_DEST_DROP: |
1880 | tx_dest = CORE_TX_DEST_DROP; |
1881 | break; |
1882 | default: |
1883 | tx_dest = CORE_TX_DEST_LB; |
1884 | break; |
1885 | } |
1886 | |
1887 | start_bd = (struct core_tx_bd *)qed_chain_produce(p_chain: p_tx_chain); |
1888 | if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && |
1889 | p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { |
1890 | start_bd->nw_vlan_or_lb_echo = |
1891 | cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); |
1892 | } else { |
1893 | start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); |
1894 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && |
1895 | p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) |
1896 | pkt->remove_stag = true; |
1897 | } |
1898 | |
1899 | bitfield1 = le16_to_cpu(start_bd->bitfield1); |
1900 | SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); |
1901 | SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); |
1902 | start_bd->bitfield1 = cpu_to_le16(bitfield1); |
1903 | |
1904 | bd_data |= pkt->bd_flags; |
1905 | SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); |
1906 | SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); |
1907 | SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); |
1908 | SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); |
1909 | SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); |
1910 | SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); |
1911 | SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, |
1912 | !!(pkt->remove_stag)); |
1913 | |
1914 | start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); |
1915 | DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); |
1916 | start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); |
1917 | |
1918 | DP_VERBOSE(p_hwfn, |
1919 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), |
1920 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n" , |
1921 | p_ll2->queue_id, |
1922 | p_ll2->cid, |
1923 | p_ll2->input.conn_type, |
1924 | prod_idx, |
1925 | pkt->first_frag_len, |
1926 | pkt->num_of_bds, |
1927 | le32_to_cpu(start_bd->addr.hi), |
1928 | le32_to_cpu(start_bd->addr.lo)); |
1929 | |
1930 | if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) |
1931 | return; |
1932 | |
1933 | /* Need to provide the packet with additional BDs for frags */ |
1934 | for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; |
1935 | frag_idx < pkt->num_of_bds; frag_idx++) { |
1936 | struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; |
1937 | |
1938 | *p_bd = (struct core_tx_bd *)qed_chain_produce(p_chain: p_tx_chain); |
1939 | (*p_bd)->bd_data.as_bitfield = 0; |
1940 | (*p_bd)->bitfield1 = 0; |
1941 | p_curp->bds_set[frag_idx].tx_frag = 0; |
1942 | p_curp->bds_set[frag_idx].frag_len = 0; |
1943 | } |
1944 | } |
1945 | |
1946 | /* This should be called while the Txq spinlock is being held */ |
1947 | static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, |
1948 | struct qed_ll2_info *p_ll2_conn) |
1949 | { |
1950 | bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; |
1951 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
1952 | struct qed_ll2_tx_packet *p_pkt = NULL; |
1953 | u16 bd_prod; |
1954 | |
1955 | /* If there are missing BDs, don't do anything now */ |
1956 | if (p_ll2_conn->tx_queue.cur_send_frag_num != |
1957 | p_ll2_conn->tx_queue.cur_send_packet->bd_used) |
1958 | return; |
1959 | |
1960 | /* Push the current packet to the list and clean after it */ |
1961 | list_add_tail(new: &p_ll2_conn->tx_queue.cur_send_packet->list_entry, |
1962 | head: &p_ll2_conn->tx_queue.sending_descq); |
1963 | p_ll2_conn->tx_queue.cur_send_packet = NULL; |
1964 | p_ll2_conn->tx_queue.cur_send_frag_num = 0; |
1965 | |
1966 | /* Notify FW of packet only if requested to */ |
1967 | if (!b_notify) |
1968 | return; |
1969 | |
1970 | bd_prod = qed_chain_get_prod_idx(chain: &p_ll2_conn->tx_queue.txq_chain); |
1971 | |
1972 | while (!list_empty(head: &p_tx->sending_descq)) { |
1973 | p_pkt = list_first_entry(&p_tx->sending_descq, |
1974 | struct qed_ll2_tx_packet, list_entry); |
1975 | if (!p_pkt) |
1976 | break; |
1977 | |
1978 | list_move_tail(list: &p_pkt->list_entry, head: &p_tx->active_descq); |
1979 | } |
1980 | |
1981 | p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); |
1982 | |
1983 | /* Make sure the BDs data is updated before ringing the doorbell */ |
1984 | wmb(); |
1985 | |
1986 | DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); |
1987 | |
1988 | DP_VERBOSE(p_hwfn, |
1989 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), |
1990 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n" , |
1991 | p_ll2_conn->queue_id, |
1992 | p_ll2_conn->cid, |
1993 | p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); |
1994 | } |
1995 | |
1996 | int qed_ll2_prepare_tx_packet(void *cxt, |
1997 | u8 connection_handle, |
1998 | struct qed_ll2_tx_pkt_info *pkt, |
1999 | bool notify_fw) |
2000 | { |
2001 | struct qed_hwfn *p_hwfn = cxt; |
2002 | struct qed_ll2_tx_packet *p_curp = NULL; |
2003 | struct qed_ll2_info *p_ll2_conn = NULL; |
2004 | struct qed_ll2_tx_queue *p_tx; |
2005 | struct qed_chain *p_tx_chain; |
2006 | unsigned long flags; |
2007 | int rc = 0; |
2008 | |
2009 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); |
2010 | if (unlikely(!p_ll2_conn)) |
2011 | return -EINVAL; |
2012 | p_tx = &p_ll2_conn->tx_queue; |
2013 | p_tx_chain = &p_tx->txq_chain; |
2014 | |
2015 | if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) |
2016 | return -EIO; |
2017 | |
2018 | spin_lock_irqsave(&p_tx->lock, flags); |
2019 | if (unlikely(p_tx->cur_send_packet)) { |
2020 | rc = -EEXIST; |
2021 | goto out; |
2022 | } |
2023 | |
2024 | /* Get entry, but only if we have tx elements for it */ |
2025 | if (unlikely(!list_empty(&p_tx->free_descq))) |
2026 | p_curp = list_first_entry(&p_tx->free_descq, |
2027 | struct qed_ll2_tx_packet, list_entry); |
2028 | if (unlikely(p_curp && |
2029 | qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) |
2030 | p_curp = NULL; |
2031 | |
2032 | if (unlikely(!p_curp)) { |
2033 | rc = -EBUSY; |
2034 | goto out; |
2035 | } |
2036 | |
2037 | /* Prepare packet and BD, and perhaps send a doorbell to FW */ |
2038 | qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); |
2039 | |
2040 | qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2: p_ll2_conn, p_curp, pkt); |
2041 | |
2042 | qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); |
2043 | |
2044 | out: |
2045 | spin_unlock_irqrestore(lock: &p_tx->lock, flags); |
2046 | return rc; |
2047 | } |
2048 | |
2049 | int qed_ll2_set_fragment_of_tx_packet(void *cxt, |
2050 | u8 connection_handle, |
2051 | dma_addr_t addr, u16 nbytes) |
2052 | { |
2053 | struct qed_ll2_tx_packet *p_cur_send_packet = NULL; |
2054 | struct qed_hwfn *p_hwfn = cxt; |
2055 | struct qed_ll2_info *p_ll2_conn = NULL; |
2056 | u16 cur_send_frag_num = 0; |
2057 | struct core_tx_bd *p_bd; |
2058 | unsigned long flags; |
2059 | |
2060 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); |
2061 | if (unlikely(!p_ll2_conn)) |
2062 | return -EINVAL; |
2063 | |
2064 | if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) |
2065 | return -EINVAL; |
2066 | |
2067 | p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; |
2068 | cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; |
2069 | |
2070 | if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) |
2071 | return -EINVAL; |
2072 | |
2073 | /* Fill the BD information, and possibly notify FW */ |
2074 | p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; |
2075 | DMA_REGPAIR_LE(p_bd->addr, addr); |
2076 | p_bd->nbytes = cpu_to_le16(nbytes); |
2077 | p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; |
2078 | p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; |
2079 | |
2080 | p_ll2_conn->tx_queue.cur_send_frag_num++; |
2081 | |
2082 | spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags); |
2083 | qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); |
2084 | spin_unlock_irqrestore(lock: &p_ll2_conn->tx_queue.lock, flags); |
2085 | |
2086 | return 0; |
2087 | } |
2088 | |
2089 | int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) |
2090 | { |
2091 | struct qed_hwfn *p_hwfn = cxt; |
2092 | struct qed_ll2_info *p_ll2_conn = NULL; |
2093 | int rc = -EINVAL; |
2094 | struct qed_ptt *p_ptt; |
2095 | |
2096 | p_ptt = qed_ptt_acquire(p_hwfn); |
2097 | if (!p_ptt) |
2098 | return -EAGAIN; |
2099 | |
2100 | p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); |
2101 | if (!p_ll2_conn) { |
2102 | rc = -EINVAL; |
2103 | goto out; |
2104 | } |
2105 | |
2106 | /* Stop Tx & Rx of connection, if needed */ |
2107 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { |
2108 | p_ll2_conn->tx_queue.b_cb_registered = false; |
2109 | smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ |
2110 | rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); |
2111 | if (rc) |
2112 | goto out; |
2113 | |
2114 | qed_ll2_txq_flush(p_hwfn, connection_handle); |
2115 | qed_int_unregister_cb(p_hwfn, pi: p_ll2_conn->tx_queue.tx_sb_index); |
2116 | } |
2117 | |
2118 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { |
2119 | p_ll2_conn->rx_queue.b_cb_registered = false; |
2120 | smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ |
2121 | |
2122 | if (p_ll2_conn->rx_queue.ctx_based) |
2123 | qed_db_recovery_del(cdev: p_hwfn->cdev, |
2124 | db_addr: p_ll2_conn->rx_queue.set_prod_addr, |
2125 | db_data: &p_ll2_conn->rx_queue.db_data); |
2126 | |
2127 | rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); |
2128 | if (rc) |
2129 | goto out; |
2130 | |
2131 | qed_ll2_rxq_flush(p_hwfn, connection_handle); |
2132 | qed_int_unregister_cb(p_hwfn, pi: p_ll2_conn->rx_queue.rx_sb_index); |
2133 | } |
2134 | |
2135 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) |
2136 | qed_ooo_release_all_isles(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info); |
2137 | |
2138 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { |
2139 | if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) |
2140 | qed_llh_remove_protocol_filter(cdev: p_hwfn->cdev, ppfid: 0, |
2141 | type: QED_LLH_FILTER_ETHERTYPE, |
2142 | ETH_P_FCOE, dest_port: 0); |
2143 | qed_llh_remove_protocol_filter(cdev: p_hwfn->cdev, ppfid: 0, |
2144 | type: QED_LLH_FILTER_ETHERTYPE, |
2145 | ETH_P_FIP, dest_port: 0); |
2146 | } |
2147 | |
2148 | out: |
2149 | qed_ptt_release(p_hwfn, p_ptt); |
2150 | return rc; |
2151 | } |
2152 | |
2153 | static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, |
2154 | struct qed_ll2_info *p_ll2_conn) |
2155 | { |
2156 | struct qed_ooo_buffer *p_buffer; |
2157 | |
2158 | if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) |
2159 | return; |
2160 | |
2161 | qed_ooo_release_all_isles(p_hwfn, p_ooo_info: p_hwfn->p_ooo_info); |
2162 | while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, |
2163 | p_ooo_info: p_hwfn->p_ooo_info))) { |
2164 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
2165 | size: p_buffer->rx_buffer_size, |
2166 | cpu_addr: p_buffer->rx_buffer_virt_addr, |
2167 | dma_handle: p_buffer->rx_buffer_phys_addr); |
2168 | kfree(objp: p_buffer); |
2169 | } |
2170 | } |
2171 | |
2172 | void qed_ll2_release_connection(void *cxt, u8 connection_handle) |
2173 | { |
2174 | struct qed_hwfn *p_hwfn = cxt; |
2175 | struct qed_ll2_info *p_ll2_conn = NULL; |
2176 | |
2177 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); |
2178 | if (!p_ll2_conn) |
2179 | return; |
2180 | |
2181 | kfree(objp: p_ll2_conn->tx_queue.descq_mem); |
2182 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_ll2_conn->tx_queue.txq_chain); |
2183 | |
2184 | kfree(objp: p_ll2_conn->rx_queue.descq_array); |
2185 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_ll2_conn->rx_queue.rxq_chain); |
2186 | qed_chain_free(cdev: p_hwfn->cdev, chain: &p_ll2_conn->rx_queue.rcq_chain); |
2187 | |
2188 | qed_cxt_release_cid(p_hwfn, cid: p_ll2_conn->cid); |
2189 | |
2190 | qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); |
2191 | |
2192 | mutex_lock(&p_ll2_conn->mutex); |
2193 | p_ll2_conn->b_active = false; |
2194 | mutex_unlock(lock: &p_ll2_conn->mutex); |
2195 | } |
2196 | |
2197 | int qed_ll2_alloc(struct qed_hwfn *p_hwfn) |
2198 | { |
2199 | struct qed_ll2_info *p_ll2_connections; |
2200 | u8 i; |
2201 | |
2202 | /* Allocate LL2's set struct */ |
2203 | p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS, |
2204 | size: sizeof(struct qed_ll2_info), GFP_KERNEL); |
2205 | if (!p_ll2_connections) { |
2206 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n" ); |
2207 | return -ENOMEM; |
2208 | } |
2209 | |
2210 | for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) |
2211 | p_ll2_connections[i].my_id = i; |
2212 | |
2213 | p_hwfn->p_ll2_info = p_ll2_connections; |
2214 | return 0; |
2215 | } |
2216 | |
2217 | void qed_ll2_setup(struct qed_hwfn *p_hwfn) |
2218 | { |
2219 | int i; |
2220 | |
2221 | for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) |
2222 | mutex_init(&p_hwfn->p_ll2_info[i].mutex); |
2223 | } |
2224 | |
2225 | void qed_ll2_free(struct qed_hwfn *p_hwfn) |
2226 | { |
2227 | if (!p_hwfn->p_ll2_info) |
2228 | return; |
2229 | |
2230 | kfree(objp: p_hwfn->p_ll2_info); |
2231 | p_hwfn->p_ll2_info = NULL; |
2232 | } |
2233 | |
2234 | static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn, |
2235 | struct qed_ptt *p_ptt, |
2236 | struct qed_ll2_stats *p_stats) |
2237 | { |
2238 | struct core_ll2_port_stats port_stats; |
2239 | |
2240 | memset(&port_stats, 0, sizeof(port_stats)); |
2241 | qed_memcpy_from(p_hwfn, p_ptt, dest: &port_stats, |
2242 | BAR0_MAP_REG_TSDM_RAM + |
2243 | TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), |
2244 | n: sizeof(port_stats)); |
2245 | |
2246 | p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); |
2247 | p_stats->gsi_invalid_pkt_length += |
2248 | HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); |
2249 | p_stats->gsi_unsupported_pkt_typ += |
2250 | HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); |
2251 | p_stats->gsi_crcchksm_error += |
2252 | HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); |
2253 | } |
2254 | |
2255 | static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, |
2256 | struct qed_ptt *p_ptt, |
2257 | struct qed_ll2_info *p_ll2_conn, |
2258 | struct qed_ll2_stats *p_stats) |
2259 | { |
2260 | struct core_ll2_tstorm_per_queue_stat tstats; |
2261 | u8 qid = p_ll2_conn->queue_id; |
2262 | u32 tstats_addr; |
2263 | |
2264 | memset(&tstats, 0, sizeof(tstats)); |
2265 | tstats_addr = BAR0_MAP_REG_TSDM_RAM + |
2266 | CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); |
2267 | qed_memcpy_from(p_hwfn, p_ptt, dest: &tstats, hw_addr: tstats_addr, n: sizeof(tstats)); |
2268 | |
2269 | p_stats->packet_too_big_discard += |
2270 | HILO_64_REGPAIR(tstats.packet_too_big_discard); |
2271 | p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard); |
2272 | } |
2273 | |
2274 | static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn, |
2275 | struct qed_ptt *p_ptt, |
2276 | struct qed_ll2_info *p_ll2_conn, |
2277 | struct qed_ll2_stats *p_stats) |
2278 | { |
2279 | struct core_ll2_ustorm_per_queue_stat ustats; |
2280 | u8 qid = p_ll2_conn->queue_id; |
2281 | u32 ustats_addr; |
2282 | |
2283 | memset(&ustats, 0, sizeof(ustats)); |
2284 | ustats_addr = BAR0_MAP_REG_USDM_RAM + |
2285 | CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); |
2286 | qed_memcpy_from(p_hwfn, p_ptt, dest: &ustats, hw_addr: ustats_addr, n: sizeof(ustats)); |
2287 | |
2288 | p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); |
2289 | p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); |
2290 | p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); |
2291 | p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); |
2292 | p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); |
2293 | p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); |
2294 | } |
2295 | |
2296 | static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, |
2297 | struct qed_ptt *p_ptt, |
2298 | struct qed_ll2_info *p_ll2_conn, |
2299 | struct qed_ll2_stats *p_stats) |
2300 | { |
2301 | struct core_ll2_pstorm_per_queue_stat pstats; |
2302 | u8 stats_id = p_ll2_conn->tx_stats_id; |
2303 | u32 pstats_addr; |
2304 | |
2305 | memset(&pstats, 0, sizeof(pstats)); |
2306 | pstats_addr = BAR0_MAP_REG_PSDM_RAM + |
2307 | CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); |
2308 | qed_memcpy_from(p_hwfn, p_ptt, dest: &pstats, hw_addr: pstats_addr, n: sizeof(pstats)); |
2309 | |
2310 | p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); |
2311 | p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); |
2312 | p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); |
2313 | p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); |
2314 | p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); |
2315 | p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); |
2316 | } |
2317 | |
2318 | static int __qed_ll2_get_stats(void *cxt, u8 connection_handle, |
2319 | struct qed_ll2_stats *p_stats) |
2320 | { |
2321 | struct qed_hwfn *p_hwfn = cxt; |
2322 | struct qed_ll2_info *p_ll2_conn = NULL; |
2323 | struct qed_ptt *p_ptt; |
2324 | |
2325 | if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) || |
2326 | !p_hwfn->p_ll2_info) |
2327 | return -EINVAL; |
2328 | |
2329 | p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; |
2330 | |
2331 | p_ptt = qed_ptt_acquire(p_hwfn); |
2332 | if (!p_ptt) { |
2333 | DP_ERR(p_hwfn, "Failed to acquire ptt\n" ); |
2334 | return -EINVAL; |
2335 | } |
2336 | |
2337 | if (p_ll2_conn->input.gsi_enable) |
2338 | _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); |
2339 | |
2340 | _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); |
2341 | |
2342 | _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); |
2343 | |
2344 | if (p_ll2_conn->tx_stats_en) |
2345 | _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); |
2346 | |
2347 | qed_ptt_release(p_hwfn, p_ptt); |
2348 | |
2349 | return 0; |
2350 | } |
2351 | |
2352 | int qed_ll2_get_stats(void *cxt, |
2353 | u8 connection_handle, struct qed_ll2_stats *p_stats) |
2354 | { |
2355 | memset(p_stats, 0, sizeof(*p_stats)); |
2356 | return __qed_ll2_get_stats(cxt, connection_handle, p_stats); |
2357 | } |
2358 | |
2359 | static void qed_ll2b_release_rx_packet(void *cxt, |
2360 | u8 connection_handle, |
2361 | void *cookie, |
2362 | dma_addr_t rx_buf_addr, |
2363 | bool b_last_packet) |
2364 | { |
2365 | struct qed_hwfn *p_hwfn = cxt; |
2366 | |
2367 | qed_ll2_dealloc_buffer(cdev: p_hwfn->cdev, buffer: cookie); |
2368 | } |
2369 | |
2370 | static void qed_ll2_register_cb_ops(struct qed_dev *cdev, |
2371 | const struct qed_ll2_cb_ops *ops, |
2372 | void *cookie) |
2373 | { |
2374 | cdev->ll2->cbs = ops; |
2375 | cdev->ll2->cb_cookie = cookie; |
2376 | } |
2377 | |
2378 | static struct qed_ll2_cbs ll2_cbs = { |
2379 | .rx_comp_cb = &qed_ll2b_complete_rx_packet, |
2380 | .rx_release_cb = &qed_ll2b_release_rx_packet, |
2381 | .tx_comp_cb = &qed_ll2b_complete_tx_packet, |
2382 | .tx_release_cb = &qed_ll2b_complete_tx_packet, |
2383 | }; |
2384 | |
2385 | static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn, |
2386 | struct qed_ll2_acquire_data *data, |
2387 | struct qed_ll2_params *params, |
2388 | enum qed_ll2_conn_type conn_type, |
2389 | u8 *handle, bool lb) |
2390 | { |
2391 | memset(data, 0, sizeof(*data)); |
2392 | |
2393 | data->input.conn_type = conn_type; |
2394 | data->input.mtu = params->mtu; |
2395 | data->input.rx_num_desc = QED_LL2_RX_SIZE; |
2396 | data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; |
2397 | data->input.rx_vlan_removal_en = params->rx_vlan_stripping; |
2398 | data->input.tx_num_desc = QED_LL2_TX_SIZE; |
2399 | data->p_connection_handle = handle; |
2400 | data->cbs = &ll2_cbs; |
2401 | ll2_cbs.cookie = p_hwfn; |
2402 | |
2403 | if (lb) { |
2404 | data->input.tx_tc = PKT_LB_TC; |
2405 | data->input.tx_dest = QED_LL2_TX_DEST_LB; |
2406 | } else { |
2407 | data->input.tx_tc = 0; |
2408 | data->input.tx_dest = QED_LL2_TX_DEST_NW; |
2409 | } |
2410 | } |
2411 | |
2412 | static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn, |
2413 | struct qed_ll2_params *params) |
2414 | { |
2415 | u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; |
2416 | struct qed_ll2_acquire_data data; |
2417 | int rc; |
2418 | |
2419 | qed_ll2_set_conn_data(p_hwfn, data: &data, params, |
2420 | conn_type: QED_LL2_TYPE_OOO, handle, lb: true); |
2421 | |
2422 | rc = qed_ll2_acquire_connection(cxt: p_hwfn, data: &data); |
2423 | if (rc) { |
2424 | DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n" ); |
2425 | goto out; |
2426 | } |
2427 | |
2428 | rc = qed_ll2_establish_connection(cxt: p_hwfn, connection_handle: *handle); |
2429 | if (rc) { |
2430 | DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n" ); |
2431 | goto fail; |
2432 | } |
2433 | |
2434 | return 0; |
2435 | |
2436 | fail: |
2437 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: *handle); |
2438 | out: |
2439 | *handle = QED_LL2_UNUSED_HANDLE; |
2440 | return rc; |
2441 | } |
2442 | |
2443 | static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev) |
2444 | { |
2445 | return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) || |
2446 | QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) || |
2447 | QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) && |
2448 | (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev)); |
2449 | } |
2450 | |
2451 | static int __qed_ll2_stop(struct qed_hwfn *p_hwfn) |
2452 | { |
2453 | struct qed_dev *cdev = p_hwfn->cdev; |
2454 | int rc; |
2455 | |
2456 | rc = qed_ll2_terminate_connection(cxt: p_hwfn, connection_handle: cdev->ll2->handle); |
2457 | if (rc) |
2458 | DP_INFO(cdev, "Failed to terminate LL2 connection\n" ); |
2459 | |
2460 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: cdev->ll2->handle); |
2461 | |
2462 | return rc; |
2463 | } |
2464 | |
2465 | static int qed_ll2_stop(struct qed_dev *cdev) |
2466 | { |
2467 | bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); |
2468 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
2469 | int rc = 0, rc2 = 0; |
2470 | |
2471 | if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) |
2472 | return 0; |
2473 | if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
2474 | qed_llh_remove_mac_filter(cdev, ppfid: 0, mac_addr: cdev->ll2_mac_address); |
2475 | |
2476 | qed_llh_remove_mac_filter(cdev, ppfid: 0, mac_addr: cdev->ll2_mac_address); |
2477 | eth_zero_addr(addr: cdev->ll2_mac_address); |
2478 | |
2479 | if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
2480 | qed_ll2_stop_ooo(p_hwfn); |
2481 | |
2482 | /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ |
2483 | if (b_is_storage_eng1) { |
2484 | rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev)); |
2485 | if (rc2) |
2486 | DP_NOTICE(QED_LEADING_HWFN(cdev), |
2487 | "Failed to stop LL2 on engine 0\n" ); |
2488 | } |
2489 | |
2490 | rc = __qed_ll2_stop(p_hwfn); |
2491 | if (rc) |
2492 | DP_NOTICE(p_hwfn, "Failed to stop LL2\n" ); |
2493 | |
2494 | qed_ll2_kill_buffers(cdev); |
2495 | |
2496 | cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; |
2497 | |
2498 | return rc | rc2; |
2499 | } |
2500 | |
2501 | static int __qed_ll2_start(struct qed_hwfn *p_hwfn, |
2502 | struct qed_ll2_params *params) |
2503 | { |
2504 | struct qed_ll2_buffer *buffer, *tmp_buffer; |
2505 | struct qed_dev *cdev = p_hwfn->cdev; |
2506 | enum qed_ll2_conn_type conn_type; |
2507 | struct qed_ll2_acquire_data data; |
2508 | int rc, rx_cnt; |
2509 | |
2510 | switch (p_hwfn->hw_info.personality) { |
2511 | case QED_PCI_FCOE: |
2512 | conn_type = QED_LL2_TYPE_FCOE; |
2513 | break; |
2514 | case QED_PCI_ISCSI: |
2515 | case QED_PCI_NVMETCP: |
2516 | conn_type = QED_LL2_TYPE_TCP_ULP; |
2517 | break; |
2518 | case QED_PCI_ETH_ROCE: |
2519 | conn_type = QED_LL2_TYPE_ROCE; |
2520 | break; |
2521 | default: |
2522 | |
2523 | conn_type = QED_LL2_TYPE_TEST; |
2524 | } |
2525 | |
2526 | qed_ll2_set_conn_data(p_hwfn, data: &data, params, conn_type, |
2527 | handle: &cdev->ll2->handle, lb: false); |
2528 | |
2529 | rc = qed_ll2_acquire_connection(cxt: p_hwfn, data: &data); |
2530 | if (rc) { |
2531 | DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n" ); |
2532 | return rc; |
2533 | } |
2534 | |
2535 | rc = qed_ll2_establish_connection(cxt: p_hwfn, connection_handle: cdev->ll2->handle); |
2536 | if (rc) { |
2537 | DP_INFO(p_hwfn, "Failed to establish LL2 connection\n" ); |
2538 | goto release_conn; |
2539 | } |
2540 | |
2541 | /* Post all Rx buffers to FW */ |
2542 | spin_lock_bh(lock: &cdev->ll2->lock); |
2543 | rx_cnt = cdev->ll2->rx_cnt; |
2544 | list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) { |
2545 | rc = qed_ll2_post_rx_buffer(cxt: p_hwfn, |
2546 | connection_handle: cdev->ll2->handle, |
2547 | addr: buffer->phys_addr, buf_len: 0, cookie: buffer, notify_fw: 1); |
2548 | if (rc) { |
2549 | DP_INFO(p_hwfn, |
2550 | "Failed to post an Rx buffer; Deleting it\n" ); |
2551 | dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, |
2552 | cdev->ll2->rx_size, DMA_FROM_DEVICE); |
2553 | kfree(objp: buffer->data); |
2554 | list_del(entry: &buffer->list); |
2555 | kfree(objp: buffer); |
2556 | } else { |
2557 | rx_cnt++; |
2558 | } |
2559 | } |
2560 | spin_unlock_bh(lock: &cdev->ll2->lock); |
2561 | |
2562 | if (rx_cnt == cdev->ll2->rx_cnt) { |
2563 | DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n" ); |
2564 | goto terminate_conn; |
2565 | } |
2566 | cdev->ll2->rx_cnt = rx_cnt; |
2567 | |
2568 | return 0; |
2569 | |
2570 | terminate_conn: |
2571 | qed_ll2_terminate_connection(cxt: p_hwfn, connection_handle: cdev->ll2->handle); |
2572 | release_conn: |
2573 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: cdev->ll2->handle); |
2574 | return rc; |
2575 | } |
2576 | |
2577 | static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) |
2578 | { |
2579 | bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); |
2580 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
2581 | struct qed_ll2_buffer *buffer; |
2582 | int rx_num_desc, i, rc; |
2583 | |
2584 | if (!is_valid_ether_addr(addr: params->ll2_mac_address)) { |
2585 | DP_NOTICE(cdev, "Invalid Ethernet address\n" ); |
2586 | return -EINVAL; |
2587 | } |
2588 | |
2589 | WARN_ON(!cdev->ll2->cbs); |
2590 | |
2591 | /* Initialize LL2 locks & lists */ |
2592 | INIT_LIST_HEAD(list: &cdev->ll2->list); |
2593 | spin_lock_init(&cdev->ll2->lock); |
2594 | |
2595 | cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN + |
2596 | L1_CACHE_BYTES + params->mtu; |
2597 | |
2598 | /* Allocate memory for LL2. |
2599 | * In CMT mode, in case of a storage PF which is affintized to engine 1, |
2600 | * LL2 is started also on engine 0 and thus we need twofold buffers. |
2601 | */ |
2602 | rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1); |
2603 | DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n" , |
2604 | rx_num_desc, cdev->ll2->rx_size); |
2605 | for (i = 0; i < rx_num_desc; i++) { |
2606 | buffer = kzalloc(size: sizeof(*buffer), GFP_KERNEL); |
2607 | if (!buffer) { |
2608 | DP_INFO(cdev, "Failed to allocate LL2 buffers\n" ); |
2609 | rc = -ENOMEM; |
2610 | goto err0; |
2611 | } |
2612 | |
2613 | rc = qed_ll2_alloc_buffer(cdev, data: (u8 **)&buffer->data, |
2614 | phys_addr: &buffer->phys_addr); |
2615 | if (rc) { |
2616 | kfree(objp: buffer); |
2617 | goto err0; |
2618 | } |
2619 | |
2620 | list_add_tail(new: &buffer->list, head: &cdev->ll2->list); |
2621 | } |
2622 | |
2623 | rc = __qed_ll2_start(p_hwfn, params); |
2624 | if (rc) { |
2625 | DP_NOTICE(cdev, "Failed to start LL2\n" ); |
2626 | goto err0; |
2627 | } |
2628 | |
2629 | /* In CMT mode, always need to start LL2 on engine 0 for a storage PF, |
2630 | * since broadcast/mutlicast packets are routed to engine 0. |
2631 | */ |
2632 | if (b_is_storage_eng1) { |
2633 | rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params); |
2634 | if (rc) { |
2635 | DP_NOTICE(QED_LEADING_HWFN(cdev), |
2636 | "Failed to start LL2 on engine 0\n" ); |
2637 | goto err1; |
2638 | } |
2639 | } |
2640 | |
2641 | if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { |
2642 | DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n" ); |
2643 | rc = qed_ll2_start_ooo(p_hwfn, params); |
2644 | if (rc) { |
2645 | DP_NOTICE(cdev, "Failed to start OOO LL2\n" ); |
2646 | goto err2; |
2647 | } |
2648 | } |
2649 | |
2650 | if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { |
2651 | rc = qed_llh_add_mac_filter(cdev, ppfid: 0, mac_addr: params->ll2_mac_address); |
2652 | if (rc) { |
2653 | DP_NOTICE(cdev, "Failed to add an LLH filter\n" ); |
2654 | goto err3; |
2655 | } |
2656 | } |
2657 | |
2658 | ether_addr_copy(dst: cdev->ll2_mac_address, src: params->ll2_mac_address); |
2659 | |
2660 | return 0; |
2661 | |
2662 | err3: |
2663 | if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
2664 | qed_ll2_stop_ooo(p_hwfn); |
2665 | err2: |
2666 | if (b_is_storage_eng1) |
2667 | __qed_ll2_stop(QED_LEADING_HWFN(cdev)); |
2668 | err1: |
2669 | __qed_ll2_stop(p_hwfn); |
2670 | err0: |
2671 | qed_ll2_kill_buffers(cdev); |
2672 | cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; |
2673 | return rc; |
2674 | } |
2675 | |
2676 | static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, |
2677 | unsigned long xmit_flags) |
2678 | { |
2679 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
2680 | struct qed_ll2_tx_pkt_info pkt; |
2681 | const skb_frag_t *frag; |
2682 | u8 flags = 0, nr_frags; |
2683 | int rc = -EINVAL, i; |
2684 | dma_addr_t mapping; |
2685 | u16 vlan = 0; |
2686 | |
2687 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { |
2688 | DP_INFO(cdev, "Cannot transmit a checksummed packet\n" ); |
2689 | return -EINVAL; |
2690 | } |
2691 | |
2692 | /* Cache number of fragments from SKB since SKB may be freed by |
2693 | * the completion routine after calling qed_ll2_prepare_tx_packet() |
2694 | */ |
2695 | nr_frags = skb_shinfo(skb)->nr_frags; |
2696 | |
2697 | if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { |
2698 | DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n" , |
2699 | 1 + nr_frags); |
2700 | return -EINVAL; |
2701 | } |
2702 | |
2703 | mapping = dma_map_single(&cdev->pdev->dev, skb->data, |
2704 | skb->len, DMA_TO_DEVICE); |
2705 | if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { |
2706 | DP_NOTICE(cdev, "SKB mapping failed\n" ); |
2707 | return -EINVAL; |
2708 | } |
2709 | |
2710 | /* Request HW to calculate IP csum */ |
2711 | if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && |
2712 | ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) |
2713 | flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); |
2714 | |
2715 | if (skb_vlan_tag_present(skb)) { |
2716 | vlan = skb_vlan_tag_get(skb); |
2717 | flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); |
2718 | } |
2719 | |
2720 | memset(&pkt, 0, sizeof(pkt)); |
2721 | pkt.num_of_bds = 1 + nr_frags; |
2722 | pkt.vlan = vlan; |
2723 | pkt.bd_flags = flags; |
2724 | pkt.tx_dest = QED_LL2_TX_DEST_NW; |
2725 | pkt.first_frag = mapping; |
2726 | pkt.first_frag_len = skb->len; |
2727 | pkt.cookie = skb; |
2728 | if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && |
2729 | test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) |
2730 | pkt.remove_stag = true; |
2731 | |
2732 | /* qed_ll2_prepare_tx_packet() may actually send the packet if |
2733 | * there are no fragments in the skb and subsequently the completion |
2734 | * routine may run and free the SKB, so no dereferencing the SKB |
2735 | * beyond this point unless skb has any fragments. |
2736 | */ |
2737 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: cdev->ll2->handle, |
2738 | pkt: &pkt, notify_fw: 1); |
2739 | if (unlikely(rc)) |
2740 | goto err; |
2741 | |
2742 | for (i = 0; i < nr_frags; i++) { |
2743 | frag = &skb_shinfo(skb)->frags[i]; |
2744 | |
2745 | mapping = skb_frag_dma_map(dev: &cdev->pdev->dev, frag, offset: 0, |
2746 | size: skb_frag_size(frag), dir: DMA_TO_DEVICE); |
2747 | |
2748 | if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { |
2749 | DP_NOTICE(cdev, |
2750 | "Unable to map frag - dropping packet\n" ); |
2751 | rc = -ENOMEM; |
2752 | goto err; |
2753 | } |
2754 | |
2755 | rc = qed_ll2_set_fragment_of_tx_packet(cxt: p_hwfn, |
2756 | connection_handle: cdev->ll2->handle, |
2757 | addr: mapping, |
2758 | nbytes: skb_frag_size(frag)); |
2759 | |
2760 | /* if failed not much to do here, partial packet has been posted |
2761 | * we can't free memory, will need to wait for completion |
2762 | */ |
2763 | if (unlikely(rc)) |
2764 | goto err2; |
2765 | } |
2766 | |
2767 | return 0; |
2768 | |
2769 | err: |
2770 | dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); |
2771 | err2: |
2772 | return rc; |
2773 | } |
2774 | |
2775 | static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) |
2776 | { |
2777 | bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); |
2778 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
2779 | int rc; |
2780 | |
2781 | if (!cdev->ll2) |
2782 | return -EINVAL; |
2783 | |
2784 | rc = qed_ll2_get_stats(cxt: p_hwfn, connection_handle: cdev->ll2->handle, p_stats: stats); |
2785 | if (rc) { |
2786 | DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n" ); |
2787 | return rc; |
2788 | } |
2789 | |
2790 | /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ |
2791 | if (b_is_storage_eng1) { |
2792 | rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev), |
2793 | connection_handle: cdev->ll2->handle, p_stats: stats); |
2794 | if (rc) { |
2795 | DP_NOTICE(QED_LEADING_HWFN(cdev), |
2796 | "Failed to get LL2 stats on engine 0\n" ); |
2797 | return rc; |
2798 | } |
2799 | } |
2800 | |
2801 | return 0; |
2802 | } |
2803 | |
2804 | const struct qed_ll2_ops qed_ll2_ops_pass = { |
2805 | .start = &qed_ll2_start, |
2806 | .stop = &qed_ll2_stop, |
2807 | .start_xmit = &qed_ll2_start_xmit, |
2808 | .register_cb_ops = &qed_ll2_register_cb_ops, |
2809 | .get_stats = &qed_ll2_stats, |
2810 | }; |
2811 | |
2812 | int qed_ll2_alloc_if(struct qed_dev *cdev) |
2813 | { |
2814 | cdev->ll2 = kzalloc(size: sizeof(*cdev->ll2), GFP_KERNEL); |
2815 | return cdev->ll2 ? 0 : -ENOMEM; |
2816 | } |
2817 | |
2818 | void qed_ll2_dealloc_if(struct qed_dev *cdev) |
2819 | { |
2820 | kfree(objp: cdev->ll2); |
2821 | cdev->ll2 = NULL; |
2822 | } |
2823 | |