1 | /* |
2 | * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet |
3 | * driver for Linux. |
4 | * |
5 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * OpenIB.org BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or |
14 | * without modification, are permitted provided that the following |
15 | * conditions are met: |
16 | * |
17 | * - Redistributions of source code must retain the above |
18 | * copyright notice, this list of conditions and the following |
19 | * disclaimer. |
20 | * |
21 | * - Redistributions in binary form must reproduce the above |
22 | * copyright notice, this list of conditions and the following |
23 | * disclaimer in the documentation and/or other materials |
24 | * provided with the distribution. |
25 | * |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
33 | * SOFTWARE. |
34 | */ |
35 | |
36 | #include <linux/skbuff.h> |
37 | #include <linux/netdevice.h> |
38 | #include <linux/etherdevice.h> |
39 | #include <linux/if_vlan.h> |
40 | #include <linux/ip.h> |
41 | #include <net/ipv6.h> |
42 | #include <net/tcp.h> |
43 | #include <linux/dma-mapping.h> |
44 | #include <linux/prefetch.h> |
45 | |
46 | #include "t4vf_common.h" |
47 | #include "t4vf_defs.h" |
48 | |
49 | #include "../cxgb4/t4_regs.h" |
50 | #include "../cxgb4/t4_values.h" |
51 | #include "../cxgb4/t4fw_api.h" |
52 | #include "../cxgb4/t4_msg.h" |
53 | |
54 | /* |
55 | * Constants ... |
56 | */ |
57 | enum { |
58 | /* |
59 | * Egress Queue sizes, producer and consumer indices are all in units |
60 | * of Egress Context Units bytes. Note that as far as the hardware is |
61 | * concerned, the free list is an Egress Queue (the host produces free |
62 | * buffers which the hardware consumes) and free list entries are |
63 | * 64-bit PCI DMA addresses. |
64 | */ |
65 | EQ_UNIT = SGE_EQ_IDXSIZE, |
66 | FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), |
67 | TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), |
68 | |
69 | /* |
70 | * Max number of TX descriptors we clean up at a time. Should be |
71 | * modest as freeing skbs isn't cheap and it happens while holding |
72 | * locks. We just need to free packets faster than they arrive, we |
73 | * eventually catch up and keep the amortized cost reasonable. |
74 | */ |
75 | MAX_TX_RECLAIM = 16, |
76 | |
77 | /* |
78 | * Max number of Rx buffers we replenish at a time. Again keep this |
79 | * modest, allocating buffers isn't cheap either. |
80 | */ |
81 | MAX_RX_REFILL = 16, |
82 | |
83 | /* |
84 | * Period of the Rx queue check timer. This timer is infrequent as it |
85 | * has something to do only when the system experiences severe memory |
86 | * shortage. |
87 | */ |
88 | RX_QCHECK_PERIOD = (HZ / 2), |
89 | |
90 | /* |
91 | * Period of the TX queue check timer and the maximum number of TX |
92 | * descriptors to be reclaimed by the TX timer. |
93 | */ |
94 | TX_QCHECK_PERIOD = (HZ / 2), |
95 | MAX_TIMER_TX_RECLAIM = 100, |
96 | |
97 | /* |
98 | * Suspend an Ethernet TX queue with fewer available descriptors than |
99 | * this. We always want to have room for a maximum sized packet: |
100 | * inline immediate data + MAX_SKB_FRAGS. This is the same as |
101 | * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS |
102 | * (see that function and its helpers for a description of the |
103 | * calculation). |
104 | */ |
105 | ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, |
106 | ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + |
107 | ((ETHTXQ_MAX_FRAGS-1) & 1) + |
108 | 2), |
109 | ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
110 | sizeof(struct cpl_tx_pkt_lso_core) + |
111 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), |
112 | ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, |
113 | |
114 | ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), |
115 | |
116 | /* |
117 | * Max TX descriptor space we allow for an Ethernet packet to be |
118 | * inlined into a WR. This is limited by the maximum value which |
119 | * we can specify for immediate data in the firmware Ethernet TX |
120 | * Work Request. |
121 | */ |
122 | MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M, |
123 | |
124 | /* |
125 | * Max size of a WR sent through a control TX queue. |
126 | */ |
127 | MAX_CTRL_WR_LEN = 256, |
128 | |
129 | /* |
130 | * Maximum amount of data which we'll ever need to inline into a |
131 | * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). |
132 | */ |
133 | MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN |
134 | ? MAX_IMM_TX_PKT_LEN |
135 | : MAX_CTRL_WR_LEN), |
136 | |
137 | /* |
138 | * For incoming packets less than RX_COPY_THRES, we copy the data into |
139 | * an skb rather than referencing the data. We allocate enough |
140 | * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes |
141 | * of the data (header). |
142 | */ |
143 | RX_COPY_THRES = 256, |
144 | RX_PULL_LEN = 128, |
145 | |
146 | /* |
147 | * Main body length for sk_buffs used for RX Ethernet packets with |
148 | * fragments. Should be >= RX_PULL_LEN but possibly bigger to give |
149 | * pskb_may_pull() some room. |
150 | */ |
151 | RX_SKB_LEN = 512, |
152 | }; |
153 | |
154 | /* |
155 | * Software state per TX descriptor. |
156 | */ |
157 | struct tx_sw_desc { |
158 | struct sk_buff *skb; /* socket buffer of TX data source */ |
159 | struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ |
160 | }; |
161 | |
162 | /* |
163 | * Software state per RX Free List descriptor. We keep track of the allocated |
164 | * FL page, its size, and its PCI DMA address (if the page is mapped). The FL |
165 | * page size and its PCI DMA mapped state are stored in the low bits of the |
166 | * PCI DMA address as per below. |
167 | */ |
168 | struct rx_sw_desc { |
169 | struct page *page; /* Free List page buffer */ |
170 | dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ |
171 | /* and flags (see below) */ |
172 | }; |
173 | |
174 | /* |
175 | * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the |
176 | * SGE also uses the low 4 bits to determine the size of the buffer. It uses |
177 | * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. |
178 | * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 |
179 | * bits can only contain a 0 or a 1 to indicate which size buffer we're giving |
180 | * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is |
181 | * maintained in an inverse sense so the hardware never sees that bit high. |
182 | */ |
183 | enum { |
184 | RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ |
185 | RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ |
186 | }; |
187 | |
188 | /** |
189 | * get_buf_addr - return DMA buffer address of software descriptor |
190 | * @sdesc: pointer to the software buffer descriptor |
191 | * |
192 | * Return the DMA buffer address of a software descriptor (stripping out |
193 | * our low-order flag bits). |
194 | */ |
195 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) |
196 | { |
197 | return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); |
198 | } |
199 | |
200 | /** |
201 | * is_buf_mapped - is buffer mapped for DMA? |
202 | * @sdesc: pointer to the software buffer descriptor |
203 | * |
204 | * Determine whether the buffer associated with a software descriptor in |
205 | * mapped for DMA or not. |
206 | */ |
207 | static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) |
208 | { |
209 | return !(sdesc->dma_addr & RX_UNMAPPED_BUF); |
210 | } |
211 | |
212 | /** |
213 | * need_skb_unmap - does the platform need unmapping of sk_buffs? |
214 | * |
215 | * Returns true if the platform needs sk_buff unmapping. The compiler |
216 | * optimizes away unnecessary code if this returns true. |
217 | */ |
218 | static inline int need_skb_unmap(void) |
219 | { |
220 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
221 | return 1; |
222 | #else |
223 | return 0; |
224 | #endif |
225 | } |
226 | |
227 | /** |
228 | * txq_avail - return the number of available slots in a TX queue |
229 | * @tq: the TX queue |
230 | * |
231 | * Returns the number of available descriptors in a TX queue. |
232 | */ |
233 | static inline unsigned int txq_avail(const struct sge_txq *tq) |
234 | { |
235 | return tq->size - 1 - tq->in_use; |
236 | } |
237 | |
238 | /** |
239 | * fl_cap - return the capacity of a Free List |
240 | * @fl: the Free List |
241 | * |
242 | * Returns the capacity of a Free List. The capacity is less than the |
243 | * size because an Egress Queue Index Unit worth of descriptors needs to |
244 | * be left unpopulated, otherwise the Producer and Consumer indices PIDX |
245 | * and CIDX will match and the hardware will think the FL is empty. |
246 | */ |
247 | static inline unsigned int fl_cap(const struct sge_fl *fl) |
248 | { |
249 | return fl->size - FL_PER_EQ_UNIT; |
250 | } |
251 | |
252 | /** |
253 | * fl_starving - return whether a Free List is starving. |
254 | * @adapter: pointer to the adapter |
255 | * @fl: the Free List |
256 | * |
257 | * Tests specified Free List to see whether the number of buffers |
258 | * available to the hardware has falled below our "starvation" |
259 | * threshold. |
260 | */ |
261 | static inline bool fl_starving(const struct adapter *adapter, |
262 | const struct sge_fl *fl) |
263 | { |
264 | const struct sge *s = &adapter->sge; |
265 | |
266 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; |
267 | } |
268 | |
269 | /** |
270 | * map_skb - map an skb for DMA to the device |
271 | * @dev: the egress net device |
272 | * @skb: the packet to map |
273 | * @addr: a pointer to the base of the DMA mapping array |
274 | * |
275 | * Map an skb for DMA to the device and return an array of DMA addresses. |
276 | */ |
277 | static int map_skb(struct device *dev, const struct sk_buff *skb, |
278 | dma_addr_t *addr) |
279 | { |
280 | const skb_frag_t *fp, *end; |
281 | const struct skb_shared_info *si; |
282 | |
283 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); |
284 | if (dma_mapping_error(dev, dma_addr: *addr)) |
285 | goto out_err; |
286 | |
287 | si = skb_shinfo(skb); |
288 | end = &si->frags[si->nr_frags]; |
289 | for (fp = si->frags; fp < end; fp++) { |
290 | *++addr = skb_frag_dma_map(dev, frag: fp, offset: 0, size: skb_frag_size(frag: fp), |
291 | dir: DMA_TO_DEVICE); |
292 | if (dma_mapping_error(dev, dma_addr: *addr)) |
293 | goto unwind; |
294 | } |
295 | return 0; |
296 | |
297 | unwind: |
298 | while (fp-- > si->frags) |
299 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
300 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); |
301 | |
302 | out_err: |
303 | return -ENOMEM; |
304 | } |
305 | |
306 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, |
307 | const struct ulptx_sgl *sgl, const struct sge_txq *tq) |
308 | { |
309 | const struct ulptx_sge_pair *p; |
310 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; |
311 | |
312 | if (likely(skb_headlen(skb))) |
313 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), |
314 | be32_to_cpu(sgl->len0), DMA_TO_DEVICE); |
315 | else { |
316 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), |
317 | be32_to_cpu(sgl->len0), DMA_TO_DEVICE); |
318 | nfrags--; |
319 | } |
320 | |
321 | /* |
322 | * the complexity below is because of the possibility of a wrap-around |
323 | * in the middle of an SGL |
324 | */ |
325 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { |
326 | if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { |
327 | unmap: |
328 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), |
329 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); |
330 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), |
331 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); |
332 | p++; |
333 | } else if ((u8 *)p == (u8 *)tq->stat) { |
334 | p = (const struct ulptx_sge_pair *)tq->desc; |
335 | goto unmap; |
336 | } else if ((u8 *)p + 8 == (u8 *)tq->stat) { |
337 | const __be64 *addr = (const __be64 *)tq->desc; |
338 | |
339 | dma_unmap_page(dev, be64_to_cpu(addr[0]), |
340 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); |
341 | dma_unmap_page(dev, be64_to_cpu(addr[1]), |
342 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); |
343 | p = (const struct ulptx_sge_pair *)&addr[2]; |
344 | } else { |
345 | const __be64 *addr = (const __be64 *)tq->desc; |
346 | |
347 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), |
348 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); |
349 | dma_unmap_page(dev, be64_to_cpu(addr[0]), |
350 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); |
351 | p = (const struct ulptx_sge_pair *)&addr[1]; |
352 | } |
353 | } |
354 | if (nfrags) { |
355 | __be64 addr; |
356 | |
357 | if ((u8 *)p == (u8 *)tq->stat) |
358 | p = (const struct ulptx_sge_pair *)tq->desc; |
359 | addr = ((u8 *)p + 16 <= (u8 *)tq->stat |
360 | ? p->addr[0] |
361 | : *(const __be64 *)tq->desc); |
362 | dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), |
363 | DMA_TO_DEVICE); |
364 | } |
365 | } |
366 | |
367 | /** |
368 | * free_tx_desc - reclaims TX descriptors and their buffers |
369 | * @adapter: the adapter |
370 | * @tq: the TX queue to reclaim descriptors from |
371 | * @n: the number of descriptors to reclaim |
372 | * @unmap: whether the buffers should be unmapped for DMA |
373 | * |
374 | * Reclaims TX descriptors from an SGE TX queue and frees the associated |
375 | * TX buffers. Called with the TX queue lock held. |
376 | */ |
377 | static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, |
378 | unsigned int n, bool unmap) |
379 | { |
380 | struct tx_sw_desc *sdesc; |
381 | unsigned int cidx = tq->cidx; |
382 | struct device *dev = adapter->pdev_dev; |
383 | |
384 | const int need_unmap = need_skb_unmap() && unmap; |
385 | |
386 | sdesc = &tq->sdesc[cidx]; |
387 | while (n--) { |
388 | /* |
389 | * If we kept a reference to the original TX skb, we need to |
390 | * unmap it from PCI DMA space (if required) and free it. |
391 | */ |
392 | if (sdesc->skb) { |
393 | if (need_unmap) |
394 | unmap_sgl(dev, skb: sdesc->skb, sgl: sdesc->sgl, tq); |
395 | dev_consume_skb_any(skb: sdesc->skb); |
396 | sdesc->skb = NULL; |
397 | } |
398 | |
399 | sdesc++; |
400 | if (++cidx == tq->size) { |
401 | cidx = 0; |
402 | sdesc = tq->sdesc; |
403 | } |
404 | } |
405 | tq->cidx = cidx; |
406 | } |
407 | |
408 | /* |
409 | * Return the number of reclaimable descriptors in a TX queue. |
410 | */ |
411 | static inline int reclaimable(const struct sge_txq *tq) |
412 | { |
413 | int hw_cidx = be16_to_cpu(tq->stat->cidx); |
414 | int reclaimable = hw_cidx - tq->cidx; |
415 | if (reclaimable < 0) |
416 | reclaimable += tq->size; |
417 | return reclaimable; |
418 | } |
419 | |
420 | /** |
421 | * reclaim_completed_tx - reclaims completed TX descriptors |
422 | * @adapter: the adapter |
423 | * @tq: the TX queue to reclaim completed descriptors from |
424 | * @unmap: whether the buffers should be unmapped for DMA |
425 | * |
426 | * Reclaims TX descriptors that the SGE has indicated it has processed, |
427 | * and frees the associated buffers if possible. Called with the TX |
428 | * queue locked. |
429 | */ |
430 | static inline void reclaim_completed_tx(struct adapter *adapter, |
431 | struct sge_txq *tq, |
432 | bool unmap) |
433 | { |
434 | int avail = reclaimable(tq); |
435 | |
436 | if (avail) { |
437 | /* |
438 | * Limit the amount of clean up work we do at a time to keep |
439 | * the TX lock hold time O(1). |
440 | */ |
441 | if (avail > MAX_TX_RECLAIM) |
442 | avail = MAX_TX_RECLAIM; |
443 | |
444 | free_tx_desc(adapter, tq, n: avail, unmap); |
445 | tq->in_use -= avail; |
446 | } |
447 | } |
448 | |
449 | /** |
450 | * get_buf_size - return the size of an RX Free List buffer. |
451 | * @adapter: pointer to the associated adapter |
452 | * @sdesc: pointer to the software buffer descriptor |
453 | */ |
454 | static inline int get_buf_size(const struct adapter *adapter, |
455 | const struct rx_sw_desc *sdesc) |
456 | { |
457 | const struct sge *s = &adapter->sge; |
458 | |
459 | return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) |
460 | ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); |
461 | } |
462 | |
463 | /** |
464 | * free_rx_bufs - free RX buffers on an SGE Free List |
465 | * @adapter: the adapter |
466 | * @fl: the SGE Free List to free buffers from |
467 | * @n: how many buffers to free |
468 | * |
469 | * Release the next @n buffers on an SGE Free List RX queue. The |
470 | * buffers must be made inaccessible to hardware before calling this |
471 | * function. |
472 | */ |
473 | static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) |
474 | { |
475 | while (n--) { |
476 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; |
477 | |
478 | if (is_buf_mapped(sdesc)) |
479 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
480 | get_buf_size(adapter, sdesc), |
481 | DMA_FROM_DEVICE); |
482 | put_page(page: sdesc->page); |
483 | sdesc->page = NULL; |
484 | if (++fl->cidx == fl->size) |
485 | fl->cidx = 0; |
486 | fl->avail--; |
487 | } |
488 | } |
489 | |
490 | /** |
491 | * unmap_rx_buf - unmap the current RX buffer on an SGE Free List |
492 | * @adapter: the adapter |
493 | * @fl: the SGE Free List |
494 | * |
495 | * Unmap the current buffer on an SGE Free List RX queue. The |
496 | * buffer must be made inaccessible to HW before calling this function. |
497 | * |
498 | * This is similar to @free_rx_bufs above but does not free the buffer. |
499 | * Do note that the FL still loses any further access to the buffer. |
500 | * This is used predominantly to "transfer ownership" of an FL buffer |
501 | * to another entity (typically an skb's fragment list). |
502 | */ |
503 | static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) |
504 | { |
505 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; |
506 | |
507 | if (is_buf_mapped(sdesc)) |
508 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
509 | get_buf_size(adapter, sdesc), |
510 | DMA_FROM_DEVICE); |
511 | sdesc->page = NULL; |
512 | if (++fl->cidx == fl->size) |
513 | fl->cidx = 0; |
514 | fl->avail--; |
515 | } |
516 | |
517 | /** |
518 | * ring_fl_db - righ doorbell on free list |
519 | * @adapter: the adapter |
520 | * @fl: the Free List whose doorbell should be rung ... |
521 | * |
522 | * Tell the Scatter Gather Engine that there are new free list entries |
523 | * available. |
524 | */ |
525 | static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) |
526 | { |
527 | u32 val = adapter->params.arch.sge_fl_db; |
528 | |
529 | /* The SGE keeps track of its Producer and Consumer Indices in terms |
530 | * of Egress Queue Units so we can only tell it about integral numbers |
531 | * of multiples of Free List Entries per Egress Queue Units ... |
532 | */ |
533 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { |
534 | if (is_t4(chip: adapter->params.chip)) |
535 | val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); |
536 | else |
537 | val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); |
538 | |
539 | /* Make sure all memory writes to the Free List queue are |
540 | * committed before we tell the hardware about them. |
541 | */ |
542 | wmb(); |
543 | |
544 | /* If we don't have access to the new User Doorbell (T5+), use |
545 | * the old doorbell mechanism; otherwise use the new BAR2 |
546 | * mechanism. |
547 | */ |
548 | if (unlikely(fl->bar2_addr == NULL)) { |
549 | t4_write_reg(adapter, |
550 | T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, |
551 | QID_V(fl->cntxt_id) | val); |
552 | } else { |
553 | writel(val: val | QID_V(fl->bar2_qid), |
554 | addr: fl->bar2_addr + SGE_UDB_KDOORBELL); |
555 | |
556 | /* This Write memory Barrier will force the write to |
557 | * the User Doorbell area to be flushed. |
558 | */ |
559 | wmb(); |
560 | } |
561 | fl->pend_cred %= FL_PER_EQ_UNIT; |
562 | } |
563 | } |
564 | |
565 | /** |
566 | * set_rx_sw_desc - initialize software RX buffer descriptor |
567 | * @sdesc: pointer to the softwore RX buffer descriptor |
568 | * @page: pointer to the page data structure backing the RX buffer |
569 | * @dma_addr: PCI DMA address (possibly with low-bit flags) |
570 | */ |
571 | static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, |
572 | dma_addr_t dma_addr) |
573 | { |
574 | sdesc->page = page; |
575 | sdesc->dma_addr = dma_addr; |
576 | } |
577 | |
578 | /* |
579 | * Support for poisoning RX buffers ... |
580 | */ |
581 | #define POISON_BUF_VAL -1 |
582 | |
583 | static inline void poison_buf(struct page *page, size_t sz) |
584 | { |
585 | #if POISON_BUF_VAL >= 0 |
586 | memset(page_address(page), POISON_BUF_VAL, sz); |
587 | #endif |
588 | } |
589 | |
590 | /** |
591 | * refill_fl - refill an SGE RX buffer ring |
592 | * @adapter: the adapter |
593 | * @fl: the Free List ring to refill |
594 | * @n: the number of new buffers to allocate |
595 | * @gfp: the gfp flags for the allocations |
596 | * |
597 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, |
598 | * allocated with the supplied gfp flags. The caller must assure that |
599 | * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN |
600 | * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number |
601 | * of buffers allocated. If afterwards the queue is found critically low, |
602 | * mark it as starving in the bitmap of starving FLs. |
603 | */ |
604 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, |
605 | int n, gfp_t gfp) |
606 | { |
607 | struct sge *s = &adapter->sge; |
608 | struct page *page; |
609 | dma_addr_t dma_addr; |
610 | unsigned int cred = fl->avail; |
611 | __be64 *d = &fl->desc[fl->pidx]; |
612 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; |
613 | |
614 | /* |
615 | * Sanity: ensure that the result of adding n Free List buffers |
616 | * won't result in wrapping the SGE's Producer Index around to |
617 | * it's Consumer Index thereby indicating an empty Free List ... |
618 | */ |
619 | BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); |
620 | |
621 | gfp |= __GFP_NOWARN; |
622 | |
623 | /* |
624 | * If we support large pages, prefer large buffers and fail over to |
625 | * small pages if we can't allocate large pages to satisfy the refill. |
626 | * If we don't support large pages, drop directly into the small page |
627 | * allocation code. |
628 | */ |
629 | if (s->fl_pg_order == 0) |
630 | goto alloc_small_pages; |
631 | |
632 | while (n) { |
633 | page = __dev_alloc_pages(gfp_mask: gfp, order: s->fl_pg_order); |
634 | if (unlikely(!page)) { |
635 | /* |
636 | * We've failed inour attempt to allocate a "large |
637 | * page". Fail over to the "small page" allocation |
638 | * below. |
639 | */ |
640 | fl->large_alloc_failed++; |
641 | break; |
642 | } |
643 | poison_buf(page, PAGE_SIZE << s->fl_pg_order); |
644 | |
645 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, |
646 | PAGE_SIZE << s->fl_pg_order, |
647 | DMA_FROM_DEVICE); |
648 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { |
649 | /* |
650 | * We've run out of DMA mapping space. Free up the |
651 | * buffer and return with what we've managed to put |
652 | * into the free list. We don't want to fail over to |
653 | * the small page allocation below in this case |
654 | * because DMA mapping resources are typically |
655 | * critical resources once they become scarse. |
656 | */ |
657 | __free_pages(page, order: s->fl_pg_order); |
658 | goto out; |
659 | } |
660 | dma_addr |= RX_LARGE_BUF; |
661 | *d++ = cpu_to_be64(dma_addr); |
662 | |
663 | set_rx_sw_desc(sdesc, page, dma_addr); |
664 | sdesc++; |
665 | |
666 | fl->avail++; |
667 | if (++fl->pidx == fl->size) { |
668 | fl->pidx = 0; |
669 | sdesc = fl->sdesc; |
670 | d = fl->desc; |
671 | } |
672 | n--; |
673 | } |
674 | |
675 | alloc_small_pages: |
676 | while (n--) { |
677 | page = __dev_alloc_page(gfp_mask: gfp); |
678 | if (unlikely(!page)) { |
679 | fl->alloc_failed++; |
680 | break; |
681 | } |
682 | poison_buf(page, PAGE_SIZE); |
683 | |
684 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, |
685 | DMA_FROM_DEVICE); |
686 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { |
687 | put_page(page); |
688 | break; |
689 | } |
690 | *d++ = cpu_to_be64(dma_addr); |
691 | |
692 | set_rx_sw_desc(sdesc, page, dma_addr); |
693 | sdesc++; |
694 | |
695 | fl->avail++; |
696 | if (++fl->pidx == fl->size) { |
697 | fl->pidx = 0; |
698 | sdesc = fl->sdesc; |
699 | d = fl->desc; |
700 | } |
701 | } |
702 | |
703 | out: |
704 | /* |
705 | * Update our accounting state to incorporate the new Free List |
706 | * buffers, tell the hardware about them and return the number of |
707 | * buffers which we were able to allocate. |
708 | */ |
709 | cred = fl->avail - cred; |
710 | fl->pend_cred += cred; |
711 | ring_fl_db(adapter, fl); |
712 | |
713 | if (unlikely(fl_starving(adapter, fl))) { |
714 | smp_wmb(); |
715 | set_bit(nr: fl->cntxt_id, addr: adapter->sge.starving_fl); |
716 | } |
717 | |
718 | return cred; |
719 | } |
720 | |
721 | /* |
722 | * Refill a Free List to its capacity or the Maximum Refill Increment, |
723 | * whichever is smaller ... |
724 | */ |
725 | static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) |
726 | { |
727 | refill_fl(adapter, fl, |
728 | min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), |
729 | GFP_ATOMIC); |
730 | } |
731 | |
732 | /** |
733 | * alloc_ring - allocate resources for an SGE descriptor ring |
734 | * @dev: the PCI device's core device |
735 | * @nelem: the number of descriptors |
736 | * @hwsize: the size of each hardware descriptor |
737 | * @swsize: the size of each software descriptor |
738 | * @busaddrp: the physical PCI bus address of the allocated ring |
739 | * @swringp: return address pointer for software ring |
740 | * @stat_size: extra space in hardware ring for status information |
741 | * |
742 | * Allocates resources for an SGE descriptor ring, such as TX queues, |
743 | * free buffer lists, response queues, etc. Each SGE ring requires |
744 | * space for its hardware descriptors plus, optionally, space for software |
745 | * state associated with each hardware entry (the metadata). The function |
746 | * returns three values: the virtual address for the hardware ring (the |
747 | * return value of the function), the PCI bus address of the hardware |
748 | * ring (in *busaddrp), and the address of the software ring (in swringp). |
749 | * Both the hardware and software rings are returned zeroed out. |
750 | */ |
751 | static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, |
752 | size_t swsize, dma_addr_t *busaddrp, void *swringp, |
753 | size_t stat_size) |
754 | { |
755 | /* |
756 | * Allocate the hardware ring and PCI DMA bus address space for said. |
757 | */ |
758 | size_t hwlen = nelem * hwsize + stat_size; |
759 | void *hwring = dma_alloc_coherent(dev, size: hwlen, dma_handle: busaddrp, GFP_KERNEL); |
760 | |
761 | if (!hwring) |
762 | return NULL; |
763 | |
764 | /* |
765 | * If the caller wants a software ring, allocate it and return a |
766 | * pointer to it in *swringp. |
767 | */ |
768 | BUG_ON((swsize != 0) != (swringp != NULL)); |
769 | if (swsize) { |
770 | void *swring = kcalloc(n: nelem, size: swsize, GFP_KERNEL); |
771 | |
772 | if (!swring) { |
773 | dma_free_coherent(dev, size: hwlen, cpu_addr: hwring, dma_handle: *busaddrp); |
774 | return NULL; |
775 | } |
776 | *(void **)swringp = swring; |
777 | } |
778 | |
779 | return hwring; |
780 | } |
781 | |
782 | /** |
783 | * sgl_len - calculates the size of an SGL of the given capacity |
784 | * @n: the number of SGL entries |
785 | * |
786 | * Calculates the number of flits (8-byte units) needed for a Direct |
787 | * Scatter/Gather List that can hold the given number of entries. |
788 | */ |
789 | static inline unsigned int sgl_len(unsigned int n) |
790 | { |
791 | /* |
792 | * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA |
793 | * addresses. The DSGL Work Request starts off with a 32-bit DSGL |
794 | * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, |
795 | * repeated sequences of { Length[i], Length[i+1], Address[i], |
796 | * Address[i+1] } (this ensures that all addresses are on 64-bit |
797 | * boundaries). If N is even, then Length[N+1] should be set to 0 and |
798 | * Address[N+1] is omitted. |
799 | * |
800 | * The following calculation incorporates all of the above. It's |
801 | * somewhat hard to follow but, briefly: the "+2" accounts for the |
802 | * first two flits which include the DSGL header, Length0 and |
803 | * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 |
804 | * flits for every pair of the remaining N) +1 if (n-1) is odd; and |
805 | * finally the "+((n-1)&1)" adds the one remaining flit needed if |
806 | * (n-1) is odd ... |
807 | */ |
808 | n--; |
809 | return (3 * n) / 2 + (n & 1) + 2; |
810 | } |
811 | |
812 | /** |
813 | * flits_to_desc - returns the num of TX descriptors for the given flits |
814 | * @flits: the number of flits |
815 | * |
816 | * Returns the number of TX descriptors needed for the supplied number |
817 | * of flits. |
818 | */ |
819 | static inline unsigned int flits_to_desc(unsigned int flits) |
820 | { |
821 | BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); |
822 | return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); |
823 | } |
824 | |
825 | /** |
826 | * is_eth_imm - can an Ethernet packet be sent as immediate data? |
827 | * @skb: the packet |
828 | * |
829 | * Returns whether an Ethernet packet is small enough to fit completely as |
830 | * immediate data. |
831 | */ |
832 | static inline int is_eth_imm(const struct sk_buff *skb) |
833 | { |
834 | /* |
835 | * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request |
836 | * which does not accommodate immediate data. We could dike out all |
837 | * of the support code for immediate data but that would tie our hands |
838 | * too much if we ever want to enhace the firmware. It would also |
839 | * create more differences between the PF and VF Drivers. |
840 | */ |
841 | return false; |
842 | } |
843 | |
844 | /** |
845 | * calc_tx_flits - calculate the number of flits for a packet TX WR |
846 | * @skb: the packet |
847 | * |
848 | * Returns the number of flits needed for a TX Work Request for the |
849 | * given Ethernet packet, including the needed WR and CPL headers. |
850 | */ |
851 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb) |
852 | { |
853 | unsigned int flits; |
854 | |
855 | /* |
856 | * If the skb is small enough, we can pump it out as a work request |
857 | * with only immediate data. In that case we just have to have the |
858 | * TX Packet header plus the skb data in the Work Request. |
859 | */ |
860 | if (is_eth_imm(skb)) |
861 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), |
862 | sizeof(__be64)); |
863 | |
864 | /* |
865 | * Otherwise, we're going to have to construct a Scatter gather list |
866 | * of the skb body and fragments. We also include the flits necessary |
867 | * for the TX Packet Work Request and CPL. We always have a firmware |
868 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and |
869 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL |
870 | * message or, if we're doing a Large Send Offload, an LSO CPL message |
871 | * with an embedded TX Packet Write CPL message. |
872 | */ |
873 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
874 | if (skb_shinfo(skb)->gso_size) |
875 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
876 | sizeof(struct cpl_tx_pkt_lso_core) + |
877 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); |
878 | else |
879 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
880 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); |
881 | return flits; |
882 | } |
883 | |
884 | /** |
885 | * write_sgl - populate a Scatter/Gather List for a packet |
886 | * @skb: the packet |
887 | * @tq: the TX queue we are writing into |
888 | * @sgl: starting location for writing the SGL |
889 | * @end: points right after the end of the SGL |
890 | * @start: start offset into skb main-body data to include in the SGL |
891 | * @addr: the list of DMA bus addresses for the SGL elements |
892 | * |
893 | * Generates a Scatter/Gather List for the buffers that make up a packet. |
894 | * The caller must provide adequate space for the SGL that will be written. |
895 | * The SGL includes all of the packet's page fragments and the data in its |
896 | * main body except for the first @start bytes. @pos must be 16-byte |
897 | * aligned and within a TX descriptor with available space. @end points |
898 | * write after the end of the SGL but does not account for any potential |
899 | * wrap around, i.e., @end > @tq->stat. |
900 | */ |
901 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, |
902 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, |
903 | const dma_addr_t *addr) |
904 | { |
905 | unsigned int i, len; |
906 | struct ulptx_sge_pair *to; |
907 | const struct skb_shared_info *si = skb_shinfo(skb); |
908 | unsigned int nfrags = si->nr_frags; |
909 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; |
910 | |
911 | len = skb_headlen(skb) - start; |
912 | if (likely(len)) { |
913 | sgl->len0 = htonl(len); |
914 | sgl->addr0 = cpu_to_be64(addr[0] + start); |
915 | nfrags++; |
916 | } else { |
917 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
918 | sgl->addr0 = cpu_to_be64(addr[1]); |
919 | } |
920 | |
921 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
922 | ULPTX_NSGE_V(nfrags)); |
923 | if (likely(--nfrags == 0)) |
924 | return; |
925 | /* |
926 | * Most of the complexity below deals with the possibility we hit the |
927 | * end of the queue in the middle of writing the SGL. For this case |
928 | * only we create the SGL in a temporary buffer and then copy it. |
929 | */ |
930 | to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; |
931 | |
932 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { |
933 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
934 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); |
935 | to->addr[0] = cpu_to_be64(addr[i]); |
936 | to->addr[1] = cpu_to_be64(addr[++i]); |
937 | } |
938 | if (nfrags) { |
939 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
940 | to->len[1] = cpu_to_be32(0); |
941 | to->addr[0] = cpu_to_be64(addr[i + 1]); |
942 | } |
943 | if (unlikely((u8 *)end > (u8 *)tq->stat)) { |
944 | unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; |
945 | |
946 | if (likely(part0)) |
947 | memcpy(sgl->sge, buf, part0); |
948 | part1 = (u8 *)end - (u8 *)tq->stat; |
949 | memcpy(tq->desc, (u8 *)buf + part0, part1); |
950 | end = (void *)tq->desc + part1; |
951 | } |
952 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ |
953 | *end = 0; |
954 | } |
955 | |
956 | /** |
957 | * ring_tx_db - check and potentially ring a TX queue's doorbell |
958 | * @adapter: the adapter |
959 | * @tq: the TX queue |
960 | * @n: number of new descriptors to give to HW |
961 | * |
962 | * Ring the doorbel for a TX queue. |
963 | */ |
964 | static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, |
965 | int n) |
966 | { |
967 | /* Make sure that all writes to the TX Descriptors are committed |
968 | * before we tell the hardware about them. |
969 | */ |
970 | wmb(); |
971 | |
972 | /* If we don't have access to the new User Doorbell (T5+), use the old |
973 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
974 | */ |
975 | if (unlikely(tq->bar2_addr == NULL)) { |
976 | u32 val = PIDX_V(n); |
977 | |
978 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, |
979 | QID_V(tq->cntxt_id) | val); |
980 | } else { |
981 | u32 val = PIDX_T5_V(n); |
982 | |
983 | /* T4 and later chips share the same PIDX field offset within |
984 | * the doorbell, but T5 and later shrank the field in order to |
985 | * gain a bit for Doorbell Priority. The field was absurdly |
986 | * large in the first place (14 bits) so we just use the T5 |
987 | * and later limits and warn if a Queue ID is too large. |
988 | */ |
989 | WARN_ON(val & DBPRIO_F); |
990 | |
991 | /* If we're only writing a single Egress Unit and the BAR2 |
992 | * Queue ID is 0, we can use the Write Combining Doorbell |
993 | * Gather Buffer; otherwise we use the simple doorbell. |
994 | */ |
995 | if (n == 1 && tq->bar2_qid == 0) { |
996 | unsigned int index = (tq->pidx |
997 | ? (tq->pidx - 1) |
998 | : (tq->size - 1)); |
999 | __be64 *src = (__be64 *)&tq->desc[index]; |
1000 | __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + |
1001 | SGE_UDB_WCDOORBELL); |
1002 | unsigned int count = EQ_UNIT / sizeof(__be64); |
1003 | |
1004 | /* Copy the TX Descriptor in a tight loop in order to |
1005 | * try to get it to the adapter in a single Write |
1006 | * Combined transfer on the PCI-E Bus. If the Write |
1007 | * Combine fails (say because of an interrupt, etc.) |
1008 | * the hardware will simply take the last write as a |
1009 | * simple doorbell write with a PIDX Increment of 1 |
1010 | * and will fetch the TX Descriptor from memory via |
1011 | * DMA. |
1012 | */ |
1013 | while (count) { |
1014 | /* the (__force u64) is because the compiler |
1015 | * doesn't understand the endian swizzling |
1016 | * going on |
1017 | */ |
1018 | writeq(val: (__force u64)*src, addr: dst); |
1019 | src++; |
1020 | dst++; |
1021 | count--; |
1022 | } |
1023 | } else |
1024 | writel(val: val | QID_V(tq->bar2_qid), |
1025 | addr: tq->bar2_addr + SGE_UDB_KDOORBELL); |
1026 | |
1027 | /* This Write Memory Barrier will force the write to the User |
1028 | * Doorbell area to be flushed. This is needed to prevent |
1029 | * writes on different CPUs for the same queue from hitting |
1030 | * the adapter out of order. This is required when some Work |
1031 | * Requests take the Write Combine Gather Buffer path (user |
1032 | * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some |
1033 | * take the traditional path where we simply increment the |
1034 | * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the |
1035 | * hardware DMA read the actual Work Request. |
1036 | */ |
1037 | wmb(); |
1038 | } |
1039 | } |
1040 | |
1041 | /** |
1042 | * inline_tx_skb - inline a packet's data into TX descriptors |
1043 | * @skb: the packet |
1044 | * @tq: the TX queue where the packet will be inlined |
1045 | * @pos: starting position in the TX queue to inline the packet |
1046 | * |
1047 | * Inline a packet's contents directly into TX descriptors, starting at |
1048 | * the given position within the TX DMA ring. |
1049 | * Most of the complexity of this operation is dealing with wrap arounds |
1050 | * in the middle of the packet we want to inline. |
1051 | */ |
1052 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, |
1053 | void *pos) |
1054 | { |
1055 | u64 *p; |
1056 | int left = (void *)tq->stat - pos; |
1057 | |
1058 | if (likely(skb->len <= left)) { |
1059 | if (likely(!skb->data_len)) |
1060 | skb_copy_from_linear_data(skb, to: pos, len: skb->len); |
1061 | else |
1062 | skb_copy_bits(skb, offset: 0, to: pos, len: skb->len); |
1063 | pos += skb->len; |
1064 | } else { |
1065 | skb_copy_bits(skb, offset: 0, to: pos, len: left); |
1066 | skb_copy_bits(skb, offset: left, to: tq->desc, len: skb->len - left); |
1067 | pos = (void *)tq->desc + (skb->len - left); |
1068 | } |
1069 | |
1070 | /* 0-pad to multiple of 16 */ |
1071 | p = PTR_ALIGN(pos, 8); |
1072 | if ((uintptr_t)p & 8) |
1073 | *p = 0; |
1074 | } |
1075 | |
1076 | /* |
1077 | * Figure out what HW csum a packet wants and return the appropriate control |
1078 | * bits. |
1079 | */ |
1080 | static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) |
1081 | { |
1082 | int csum_type; |
1083 | const struct iphdr *iph = ip_hdr(skb); |
1084 | |
1085 | if (iph->version == 4) { |
1086 | if (iph->protocol == IPPROTO_TCP) |
1087 | csum_type = TX_CSUM_TCPIP; |
1088 | else if (iph->protocol == IPPROTO_UDP) |
1089 | csum_type = TX_CSUM_UDPIP; |
1090 | else { |
1091 | nocsum: |
1092 | /* |
1093 | * unknown protocol, disable HW csum |
1094 | * and hope a bad packet is detected |
1095 | */ |
1096 | return TXPKT_L4CSUM_DIS_F; |
1097 | } |
1098 | } else { |
1099 | /* |
1100 | * this doesn't work with extension headers |
1101 | */ |
1102 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; |
1103 | |
1104 | if (ip6h->nexthdr == IPPROTO_TCP) |
1105 | csum_type = TX_CSUM_TCPIP6; |
1106 | else if (ip6h->nexthdr == IPPROTO_UDP) |
1107 | csum_type = TX_CSUM_UDPIP6; |
1108 | else |
1109 | goto nocsum; |
1110 | } |
1111 | |
1112 | if (likely(csum_type >= TX_CSUM_TCPIP)) { |
1113 | u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); |
1114 | int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; |
1115 | |
1116 | if (chip <= CHELSIO_T5) |
1117 | hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); |
1118 | else |
1119 | hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); |
1120 | return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; |
1121 | } else { |
1122 | int start = skb_transport_offset(skb); |
1123 | |
1124 | return TXPKT_CSUM_TYPE_V(csum_type) | |
1125 | TXPKT_CSUM_START_V(start) | |
1126 | TXPKT_CSUM_LOC_V(start + skb->csum_offset); |
1127 | } |
1128 | } |
1129 | |
1130 | /* |
1131 | * Stop an Ethernet TX queue and record that state change. |
1132 | */ |
1133 | static void txq_stop(struct sge_eth_txq *txq) |
1134 | { |
1135 | netif_tx_stop_queue(dev_queue: txq->txq); |
1136 | txq->q.stops++; |
1137 | } |
1138 | |
1139 | /* |
1140 | * Advance our software state for a TX queue by adding n in use descriptors. |
1141 | */ |
1142 | static inline void txq_advance(struct sge_txq *tq, unsigned int n) |
1143 | { |
1144 | tq->in_use += n; |
1145 | tq->pidx += n; |
1146 | if (tq->pidx >= tq->size) |
1147 | tq->pidx -= tq->size; |
1148 | } |
1149 | |
1150 | /** |
1151 | * t4vf_eth_xmit - add a packet to an Ethernet TX queue |
1152 | * @skb: the packet |
1153 | * @dev: the egress net device |
1154 | * |
1155 | * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. |
1156 | */ |
1157 | netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
1158 | { |
1159 | u32 wr_mid; |
1160 | u64 cntrl, *end; |
1161 | int qidx, credits, max_pkt_len; |
1162 | unsigned int flits, ndesc; |
1163 | struct adapter *adapter; |
1164 | struct sge_eth_txq *txq; |
1165 | const struct port_info *pi; |
1166 | struct fw_eth_tx_pkt_vm_wr *wr; |
1167 | struct cpl_tx_pkt_core *cpl; |
1168 | const struct skb_shared_info *ssi; |
1169 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; |
1170 | const size_t fw_hdr_copy_len = sizeof(wr->firmware); |
1171 | |
1172 | /* |
1173 | * The chip minimum packet length is 10 octets but the firmware |
1174 | * command that we are using requires that we copy the Ethernet header |
1175 | * (including the VLAN tag) into the header so we reject anything |
1176 | * smaller than that ... |
1177 | */ |
1178 | if (unlikely(skb->len < fw_hdr_copy_len)) |
1179 | goto out_free; |
1180 | |
1181 | /* Discard the packet if the length is greater than mtu */ |
1182 | max_pkt_len = ETH_HLEN + dev->mtu; |
1183 | if (skb_vlan_tagged(skb)) |
1184 | max_pkt_len += VLAN_HLEN; |
1185 | if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) |
1186 | goto out_free; |
1187 | |
1188 | /* |
1189 | * Figure out which TX Queue we're going to use. |
1190 | */ |
1191 | pi = netdev_priv(dev); |
1192 | adapter = pi->adapter; |
1193 | qidx = skb_get_queue_mapping(skb); |
1194 | BUG_ON(qidx >= pi->nqsets); |
1195 | txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; |
1196 | |
1197 | if (pi->vlan_id && !skb_vlan_tag_present(skb)) |
1198 | __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), |
1199 | vlan_tci: pi->vlan_id); |
1200 | |
1201 | /* |
1202 | * Take this opportunity to reclaim any TX Descriptors whose DMA |
1203 | * transfers have completed. |
1204 | */ |
1205 | reclaim_completed_tx(adapter, tq: &txq->q, unmap: true); |
1206 | |
1207 | /* |
1208 | * Calculate the number of flits and TX Descriptors we're going to |
1209 | * need along with how many TX Descriptors will be left over after |
1210 | * we inject our Work Request. |
1211 | */ |
1212 | flits = calc_tx_flits(skb); |
1213 | ndesc = flits_to_desc(flits); |
1214 | credits = txq_avail(tq: &txq->q) - ndesc; |
1215 | |
1216 | if (unlikely(credits < 0)) { |
1217 | /* |
1218 | * Not enough room for this packet's Work Request. Stop the |
1219 | * TX Queue and return a "busy" condition. The queue will get |
1220 | * started later on when the firmware informs us that space |
1221 | * has opened up. |
1222 | */ |
1223 | txq_stop(txq); |
1224 | dev_err(adapter->pdev_dev, |
1225 | "%s: TX ring %u full while queue awake!\n" , |
1226 | dev->name, qidx); |
1227 | return NETDEV_TX_BUSY; |
1228 | } |
1229 | |
1230 | if (!is_eth_imm(skb) && |
1231 | unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { |
1232 | /* |
1233 | * We need to map the skb into PCI DMA space (because it can't |
1234 | * be in-lined directly into the Work Request) and the mapping |
1235 | * operation failed. Record the error and drop the packet. |
1236 | */ |
1237 | txq->mapping_err++; |
1238 | goto out_free; |
1239 | } |
1240 | |
1241 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); |
1242 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1243 | /* |
1244 | * After we're done injecting the Work Request for this |
1245 | * packet, we'll be below our "stop threshold" so stop the TX |
1246 | * Queue now and schedule a request for an SGE Egress Queue |
1247 | * Update message. The queue will get started later on when |
1248 | * the firmware processes this Work Request and sends us an |
1249 | * Egress Queue Status Update message indicating that space |
1250 | * has opened up. |
1251 | */ |
1252 | txq_stop(txq); |
1253 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; |
1254 | } |
1255 | |
1256 | /* |
1257 | * Start filling in our Work Request. Note that we do _not_ handle |
1258 | * the WR Header wrapping around the TX Descriptor Ring. If our |
1259 | * maximum header size ever exceeds one TX Descriptor, we'll need to |
1260 | * do something else here. |
1261 | */ |
1262 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); |
1263 | wr = (void *)&txq->q.desc[txq->q.pidx]; |
1264 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
1265 | wr->r3[0] = cpu_to_be32(0); |
1266 | wr->r3[1] = cpu_to_be32(0); |
1267 | skb_copy_from_linear_data(skb, to: &wr->firmware, len: fw_hdr_copy_len); |
1268 | end = (u64 *)wr + flits; |
1269 | |
1270 | /* |
1271 | * If this is a Large Send Offload packet we'll put in an LSO CPL |
1272 | * message with an encapsulated TX Packet CPL message. Otherwise we |
1273 | * just use a TX Packet CPL message. |
1274 | */ |
1275 | ssi = skb_shinfo(skb); |
1276 | if (ssi->gso_size) { |
1277 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); |
1278 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; |
1279 | int l3hdr_len = skb_network_header_len(skb); |
1280 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; |
1281 | |
1282 | wr->op_immdlen = |
1283 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | |
1284 | FW_WR_IMMDLEN_V(sizeof(*lso) + |
1285 | sizeof(*cpl))); |
1286 | /* |
1287 | * Fill in the LSO CPL message. |
1288 | */ |
1289 | lso->lso_ctrl = |
1290 | cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | |
1291 | LSO_FIRST_SLICE_F | |
1292 | LSO_LAST_SLICE_F | |
1293 | LSO_IPV6_V(v6) | |
1294 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | |
1295 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | |
1296 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); |
1297 | lso->ipid_ofst = cpu_to_be16(0); |
1298 | lso->mss = cpu_to_be16(ssi->gso_size); |
1299 | lso->seqno_offset = cpu_to_be32(0); |
1300 | if (is_t4(chip: adapter->params.chip)) |
1301 | lso->len = cpu_to_be32(skb->len); |
1302 | else |
1303 | lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); |
1304 | |
1305 | /* |
1306 | * Set up TX Packet CPL pointer, control word and perform |
1307 | * accounting. |
1308 | */ |
1309 | cpl = (void *)(lso + 1); |
1310 | |
1311 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) |
1312 | cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); |
1313 | else |
1314 | cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); |
1315 | |
1316 | cntrl |= TXPKT_CSUM_TYPE_V(v6 ? |
1317 | TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | |
1318 | TXPKT_IPHDR_LEN_V(l3hdr_len); |
1319 | txq->tso++; |
1320 | txq->tx_cso += ssi->gso_segs; |
1321 | } else { |
1322 | int len; |
1323 | |
1324 | len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); |
1325 | wr->op_immdlen = |
1326 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | |
1327 | FW_WR_IMMDLEN_V(len)); |
1328 | |
1329 | /* |
1330 | * Set up TX Packet CPL pointer, control word and perform |
1331 | * accounting. |
1332 | */ |
1333 | cpl = (void *)(wr + 1); |
1334 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1335 | cntrl = hwcsum(chip: adapter->params.chip, skb) | |
1336 | TXPKT_IPCSUM_DIS_F; |
1337 | txq->tx_cso++; |
1338 | } else |
1339 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
1340 | } |
1341 | |
1342 | /* |
1343 | * If there's a VLAN tag present, add that to the list of things to |
1344 | * do in this Work Request. |
1345 | */ |
1346 | if (skb_vlan_tag_present(skb)) { |
1347 | txq->vlan_ins++; |
1348 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
1349 | } |
1350 | |
1351 | /* |
1352 | * Fill in the TX Packet CPL message header. |
1353 | */ |
1354 | cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | |
1355 | TXPKT_INTF_V(pi->port_id) | |
1356 | TXPKT_PF_V(0)); |
1357 | cpl->pack = cpu_to_be16(0); |
1358 | cpl->len = cpu_to_be16(skb->len); |
1359 | cpl->ctrl1 = cpu_to_be64(cntrl); |
1360 | |
1361 | #ifdef T4_TRACE |
1362 | T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], |
1363 | "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u" , |
1364 | ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); |
1365 | #endif |
1366 | |
1367 | /* |
1368 | * Fill in the body of the TX Packet CPL message with either in-lined |
1369 | * data or a Scatter/Gather List. |
1370 | */ |
1371 | if (is_eth_imm(skb)) { |
1372 | /* |
1373 | * In-line the packet's data and free the skb since we don't |
1374 | * need it any longer. |
1375 | */ |
1376 | inline_tx_skb(skb, tq: &txq->q, pos: cpl + 1); |
1377 | dev_consume_skb_any(skb); |
1378 | } else { |
1379 | /* |
1380 | * Write the skb's Scatter/Gather list into the TX Packet CPL |
1381 | * message and retain a pointer to the skb so we can free it |
1382 | * later when its DMA completes. (We store the skb pointer |
1383 | * in the Software Descriptor corresponding to the last TX |
1384 | * Descriptor used by the Work Request.) |
1385 | * |
1386 | * The retained skb will be freed when the corresponding TX |
1387 | * Descriptors are reclaimed after their DMAs complete. |
1388 | * However, this could take quite a while since, in general, |
1389 | * the hardware is set up to be lazy about sending DMA |
1390 | * completion notifications to us and we mostly perform TX |
1391 | * reclaims in the transmit routine. |
1392 | * |
1393 | * This is good for performamce but means that we rely on new |
1394 | * TX packets arriving to run the destructors of completed |
1395 | * packets, which open up space in their sockets' send queues. |
1396 | * Sometimes we do not get such new packets causing TX to |
1397 | * stall. A single UDP transmitter is a good example of this |
1398 | * situation. We have a clean up timer that periodically |
1399 | * reclaims completed packets but it doesn't run often enough |
1400 | * (nor do we want it to) to prevent lengthy stalls. A |
1401 | * solution to this problem is to run the destructor early, |
1402 | * after the packet is queued but before it's DMAd. A con is |
1403 | * that we lie to socket memory accounting, but the amount of |
1404 | * extra memory is reasonable (limited by the number of TX |
1405 | * descriptors), the packets do actually get freed quickly by |
1406 | * new packets almost always, and for protocols like TCP that |
1407 | * wait for acks to really free up the data the extra memory |
1408 | * is even less. On the positive side we run the destructors |
1409 | * on the sending CPU rather than on a potentially different |
1410 | * completing CPU, usually a good thing. |
1411 | * |
1412 | * Run the destructor before telling the DMA engine about the |
1413 | * packet to make sure it doesn't complete and get freed |
1414 | * prematurely. |
1415 | */ |
1416 | struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); |
1417 | struct sge_txq *tq = &txq->q; |
1418 | int last_desc; |
1419 | |
1420 | /* |
1421 | * If the Work Request header was an exact multiple of our TX |
1422 | * Descriptor length, then it's possible that the starting SGL |
1423 | * pointer lines up exactly with the end of our TX Descriptor |
1424 | * ring. If that's the case, wrap around to the beginning |
1425 | * here ... |
1426 | */ |
1427 | if (unlikely((void *)sgl == (void *)tq->stat)) { |
1428 | sgl = (void *)tq->desc; |
1429 | end = ((void *)tq->desc + ((void *)end - (void *)tq->stat)); |
1430 | } |
1431 | |
1432 | write_sgl(skb, tq, sgl, end, start: 0, addr); |
1433 | skb_orphan(skb); |
1434 | |
1435 | last_desc = tq->pidx + ndesc - 1; |
1436 | if (last_desc >= tq->size) |
1437 | last_desc -= tq->size; |
1438 | tq->sdesc[last_desc].skb = skb; |
1439 | tq->sdesc[last_desc].sgl = sgl; |
1440 | } |
1441 | |
1442 | /* |
1443 | * Advance our internal TX Queue state, tell the hardware about |
1444 | * the new TX descriptors and return success. |
1445 | */ |
1446 | txq_advance(tq: &txq->q, n: ndesc); |
1447 | netif_trans_update(dev); |
1448 | ring_tx_db(adapter, tq: &txq->q, n: ndesc); |
1449 | return NETDEV_TX_OK; |
1450 | |
1451 | out_free: |
1452 | /* |
1453 | * An error of some sort happened. Free the TX skb and tell the |
1454 | * OS that we've "dealt" with the packet ... |
1455 | */ |
1456 | dev_kfree_skb_any(skb); |
1457 | return NETDEV_TX_OK; |
1458 | } |
1459 | |
1460 | /** |
1461 | * copy_frags - copy fragments from gather list into skb_shared_info |
1462 | * @skb: destination skb |
1463 | * @gl: source internal packet gather list |
1464 | * @offset: packet start offset in first page |
1465 | * |
1466 | * Copy an internal packet gather list into a Linux skb_shared_info |
1467 | * structure. |
1468 | */ |
1469 | static inline void copy_frags(struct sk_buff *skb, |
1470 | const struct pkt_gl *gl, |
1471 | unsigned int offset) |
1472 | { |
1473 | int i; |
1474 | |
1475 | /* usually there's just one frag */ |
1476 | __skb_fill_page_desc(skb, i: 0, page: gl->frags[0].page, |
1477 | off: gl->frags[0].offset + offset, |
1478 | size: gl->frags[0].size - offset); |
1479 | skb_shinfo(skb)->nr_frags = gl->nfrags; |
1480 | for (i = 1; i < gl->nfrags; i++) |
1481 | __skb_fill_page_desc(skb, i, page: gl->frags[i].page, |
1482 | off: gl->frags[i].offset, |
1483 | size: gl->frags[i].size); |
1484 | |
1485 | /* get a reference to the last page, we don't own it */ |
1486 | get_page(page: gl->frags[gl->nfrags - 1].page); |
1487 | } |
1488 | |
1489 | /** |
1490 | * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list |
1491 | * @gl: the gather list |
1492 | * @skb_len: size of sk_buff main body if it carries fragments |
1493 | * @pull_len: amount of data to move to the sk_buff's main body |
1494 | * |
1495 | * Builds an sk_buff from the given packet gather list. Returns the |
1496 | * sk_buff or %NULL if sk_buff allocation failed. |
1497 | */ |
1498 | static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, |
1499 | unsigned int skb_len, |
1500 | unsigned int pull_len) |
1501 | { |
1502 | struct sk_buff *skb; |
1503 | |
1504 | /* |
1505 | * If the ingress packet is small enough, allocate an skb large enough |
1506 | * for all of the data and copy it inline. Otherwise, allocate an skb |
1507 | * with enough room to pull in the header and reference the rest of |
1508 | * the data via the skb fragment list. |
1509 | * |
1510 | * Below we rely on RX_COPY_THRES being less than the smallest Rx |
1511 | * buff! size, which is expected since buffers are at least |
1512 | * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one |
1513 | * fragment. |
1514 | */ |
1515 | if (gl->tot_len <= RX_COPY_THRES) { |
1516 | /* small packets have only one fragment */ |
1517 | skb = alloc_skb(size: gl->tot_len, GFP_ATOMIC); |
1518 | if (unlikely(!skb)) |
1519 | goto out; |
1520 | __skb_put(skb, len: gl->tot_len); |
1521 | skb_copy_to_linear_data(skb, from: gl->va, len: gl->tot_len); |
1522 | } else { |
1523 | skb = alloc_skb(size: skb_len, GFP_ATOMIC); |
1524 | if (unlikely(!skb)) |
1525 | goto out; |
1526 | __skb_put(skb, len: pull_len); |
1527 | skb_copy_to_linear_data(skb, from: gl->va, len: pull_len); |
1528 | |
1529 | copy_frags(skb, gl, offset: pull_len); |
1530 | skb->len = gl->tot_len; |
1531 | skb->data_len = skb->len - pull_len; |
1532 | skb->truesize += skb->data_len; |
1533 | } |
1534 | |
1535 | out: |
1536 | return skb; |
1537 | } |
1538 | |
1539 | /** |
1540 | * t4vf_pktgl_free - free a packet gather list |
1541 | * @gl: the gather list |
1542 | * |
1543 | * Releases the pages of a packet gather list. We do not own the last |
1544 | * page on the list and do not free it. |
1545 | */ |
1546 | static void t4vf_pktgl_free(const struct pkt_gl *gl) |
1547 | { |
1548 | int frag; |
1549 | |
1550 | frag = gl->nfrags - 1; |
1551 | while (frag--) |
1552 | put_page(page: gl->frags[frag].page); |
1553 | } |
1554 | |
1555 | /** |
1556 | * do_gro - perform Generic Receive Offload ingress packet processing |
1557 | * @rxq: ingress RX Ethernet Queue |
1558 | * @gl: gather list for ingress packet |
1559 | * @pkt: CPL header for last packet fragment |
1560 | * |
1561 | * Perform Generic Receive Offload (GRO) ingress packet processing. |
1562 | * We use the standard Linux GRO interfaces for this. |
1563 | */ |
1564 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, |
1565 | const struct cpl_rx_pkt *pkt) |
1566 | { |
1567 | struct adapter *adapter = rxq->rspq.adapter; |
1568 | struct sge *s = &adapter->sge; |
1569 | struct port_info *pi; |
1570 | int ret; |
1571 | struct sk_buff *skb; |
1572 | |
1573 | skb = napi_get_frags(napi: &rxq->rspq.napi); |
1574 | if (unlikely(!skb)) { |
1575 | t4vf_pktgl_free(gl); |
1576 | rxq->stats.rx_drops++; |
1577 | return; |
1578 | } |
1579 | |
1580 | copy_frags(skb, gl, offset: s->pktshift); |
1581 | skb->len = gl->tot_len - s->pktshift; |
1582 | skb->data_len = skb->len; |
1583 | skb->truesize += skb->data_len; |
1584 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1585 | skb_record_rx_queue(skb, rx_queue: rxq->rspq.idx); |
1586 | pi = netdev_priv(dev: skb->dev); |
1587 | |
1588 | if (pkt->vlan_ex && !pi->vlan_id) { |
1589 | __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), |
1590 | be16_to_cpu(pkt->vlan)); |
1591 | rxq->stats.vlan_ex++; |
1592 | } |
1593 | ret = napi_gro_frags(napi: &rxq->rspq.napi); |
1594 | |
1595 | if (ret == GRO_HELD) |
1596 | rxq->stats.lro_pkts++; |
1597 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) |
1598 | rxq->stats.lro_merged++; |
1599 | rxq->stats.pkts++; |
1600 | rxq->stats.rx_cso++; |
1601 | } |
1602 | |
1603 | /** |
1604 | * t4vf_ethrx_handler - process an ingress ethernet packet |
1605 | * @rspq: the response queue that received the packet |
1606 | * @rsp: the response queue descriptor holding the RX_PKT message |
1607 | * @gl: the gather list of packet fragments |
1608 | * |
1609 | * Process an ingress ethernet packet and deliver it to the stack. |
1610 | */ |
1611 | int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, |
1612 | const struct pkt_gl *gl) |
1613 | { |
1614 | struct sk_buff *skb; |
1615 | const struct cpl_rx_pkt *pkt = (void *)rsp; |
1616 | bool csum_ok = pkt->csum_calc && !pkt->err_vec && |
1617 | (rspq->netdev->features & NETIF_F_RXCSUM); |
1618 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1619 | struct adapter *adapter = rspq->adapter; |
1620 | struct sge *s = &adapter->sge; |
1621 | struct port_info *pi; |
1622 | |
1623 | /* |
1624 | * If this is a good TCP packet and we have Generic Receive Offload |
1625 | * enabled, handle the packet in the GRO path. |
1626 | */ |
1627 | if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) && |
1628 | (rspq->netdev->features & NETIF_F_GRO) && csum_ok && |
1629 | !pkt->ip_frag) { |
1630 | do_gro(rxq, gl, pkt); |
1631 | return 0; |
1632 | } |
1633 | |
1634 | /* |
1635 | * Convert the Packet Gather List into an skb. |
1636 | */ |
1637 | skb = t4vf_pktgl_to_skb(gl, skb_len: RX_SKB_LEN, pull_len: RX_PULL_LEN); |
1638 | if (unlikely(!skb)) { |
1639 | t4vf_pktgl_free(gl); |
1640 | rxq->stats.rx_drops++; |
1641 | return 0; |
1642 | } |
1643 | __skb_pull(skb, len: s->pktshift); |
1644 | skb->protocol = eth_type_trans(skb, dev: rspq->netdev); |
1645 | skb_record_rx_queue(skb, rx_queue: rspq->idx); |
1646 | pi = netdev_priv(dev: skb->dev); |
1647 | rxq->stats.pkts++; |
1648 | |
1649 | if (csum_ok && !pkt->err_vec && |
1650 | (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) { |
1651 | if (!pkt->ip_frag) { |
1652 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1653 | rxq->stats.rx_cso++; |
1654 | } else if (pkt->l2info & htonl(RXF_IP_F)) { |
1655 | __sum16 c = (__force __sum16)pkt->csum; |
1656 | skb->csum = csum_unfold(n: c); |
1657 | skb->ip_summed = CHECKSUM_COMPLETE; |
1658 | rxq->stats.rx_cso++; |
1659 | } |
1660 | } else |
1661 | skb_checksum_none_assert(skb); |
1662 | |
1663 | if (pkt->vlan_ex && !pi->vlan_id) { |
1664 | rxq->stats.vlan_ex++; |
1665 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1666 | be16_to_cpu(pkt->vlan)); |
1667 | } |
1668 | |
1669 | netif_receive_skb(skb); |
1670 | |
1671 | return 0; |
1672 | } |
1673 | |
1674 | /** |
1675 | * is_new_response - check if a response is newly written |
1676 | * @rc: the response control descriptor |
1677 | * @rspq: the response queue |
1678 | * |
1679 | * Returns true if a response descriptor contains a yet unprocessed |
1680 | * response. |
1681 | */ |
1682 | static inline bool is_new_response(const struct rsp_ctrl *rc, |
1683 | const struct sge_rspq *rspq) |
1684 | { |
1685 | return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen; |
1686 | } |
1687 | |
1688 | /** |
1689 | * restore_rx_bufs - put back a packet's RX buffers |
1690 | * @gl: the packet gather list |
1691 | * @fl: the SGE Free List |
1692 | * @frags: how many fragments in @si |
1693 | * |
1694 | * Called when we find out that the current packet, @si, can't be |
1695 | * processed right away for some reason. This is a very rare event and |
1696 | * there's no effort to make this suspension/resumption process |
1697 | * particularly efficient. |
1698 | * |
1699 | * We implement the suspension by putting all of the RX buffers associated |
1700 | * with the current packet back on the original Free List. The buffers |
1701 | * have already been unmapped and are left unmapped, we mark them as |
1702 | * unmapped in order to prevent further unmapping attempts. (Effectively |
1703 | * this function undoes the series of @unmap_rx_buf calls which were done |
1704 | * to create the current packet's gather list.) This leaves us ready to |
1705 | * restart processing of the packet the next time we start processing the |
1706 | * RX Queue ... |
1707 | */ |
1708 | static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, |
1709 | int frags) |
1710 | { |
1711 | struct rx_sw_desc *sdesc; |
1712 | |
1713 | while (frags--) { |
1714 | if (fl->cidx == 0) |
1715 | fl->cidx = fl->size - 1; |
1716 | else |
1717 | fl->cidx--; |
1718 | sdesc = &fl->sdesc[fl->cidx]; |
1719 | sdesc->page = gl->frags[frags].page; |
1720 | sdesc->dma_addr |= RX_UNMAPPED_BUF; |
1721 | fl->avail++; |
1722 | } |
1723 | } |
1724 | |
1725 | /** |
1726 | * rspq_next - advance to the next entry in a response queue |
1727 | * @rspq: the queue |
1728 | * |
1729 | * Updates the state of a response queue to advance it to the next entry. |
1730 | */ |
1731 | static inline void rspq_next(struct sge_rspq *rspq) |
1732 | { |
1733 | rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; |
1734 | if (unlikely(++rspq->cidx == rspq->size)) { |
1735 | rspq->cidx = 0; |
1736 | rspq->gen ^= 1; |
1737 | rspq->cur_desc = rspq->desc; |
1738 | } |
1739 | } |
1740 | |
1741 | /** |
1742 | * process_responses - process responses from an SGE response queue |
1743 | * @rspq: the ingress response queue to process |
1744 | * @budget: how many responses can be processed in this round |
1745 | * |
1746 | * Process responses from a Scatter Gather Engine response queue up to |
1747 | * the supplied budget. Responses include received packets as well as |
1748 | * control messages from firmware or hardware. |
1749 | * |
1750 | * Additionally choose the interrupt holdoff time for the next interrupt |
1751 | * on this queue. If the system is under memory shortage use a fairly |
1752 | * long delay to help recovery. |
1753 | */ |
1754 | static int process_responses(struct sge_rspq *rspq, int budget) |
1755 | { |
1756 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1757 | struct adapter *adapter = rspq->adapter; |
1758 | struct sge *s = &adapter->sge; |
1759 | int budget_left = budget; |
1760 | |
1761 | while (likely(budget_left)) { |
1762 | int ret, rsp_type; |
1763 | const struct rsp_ctrl *rc; |
1764 | |
1765 | rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); |
1766 | if (!is_new_response(rc, rspq)) |
1767 | break; |
1768 | |
1769 | /* |
1770 | * Figure out what kind of response we've received from the |
1771 | * SGE. |
1772 | */ |
1773 | dma_rmb(); |
1774 | rsp_type = RSPD_TYPE_G(rc->type_gen); |
1775 | if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { |
1776 | struct page_frag *fp; |
1777 | struct pkt_gl gl; |
1778 | const struct rx_sw_desc *sdesc; |
1779 | u32 bufsz, frag; |
1780 | u32 len = be32_to_cpu(rc->pldbuflen_qid); |
1781 | |
1782 | /* |
1783 | * If we get a "new buffer" message from the SGE we |
1784 | * need to move on to the next Free List buffer. |
1785 | */ |
1786 | if (len & RSPD_NEWBUF_F) { |
1787 | /* |
1788 | * We get one "new buffer" message when we |
1789 | * first start up a queue so we need to ignore |
1790 | * it when our offset into the buffer is 0. |
1791 | */ |
1792 | if (likely(rspq->offset > 0)) { |
1793 | free_rx_bufs(adapter: rspq->adapter, fl: &rxq->fl, |
1794 | n: 1); |
1795 | rspq->offset = 0; |
1796 | } |
1797 | len = RSPD_LEN_G(len); |
1798 | } |
1799 | gl.tot_len = len; |
1800 | |
1801 | /* |
1802 | * Gather packet fragments. |
1803 | */ |
1804 | for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { |
1805 | BUG_ON(frag >= MAX_SKB_FRAGS); |
1806 | BUG_ON(rxq->fl.avail == 0); |
1807 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; |
1808 | bufsz = get_buf_size(adapter, sdesc); |
1809 | fp->page = sdesc->page; |
1810 | fp->offset = rspq->offset; |
1811 | fp->size = min(bufsz, len); |
1812 | len -= fp->size; |
1813 | if (!len) |
1814 | break; |
1815 | unmap_rx_buf(adapter: rspq->adapter, fl: &rxq->fl); |
1816 | } |
1817 | gl.nfrags = frag+1; |
1818 | |
1819 | /* |
1820 | * Last buffer remains mapped so explicitly make it |
1821 | * coherent for CPU access and start preloading first |
1822 | * cache line ... |
1823 | */ |
1824 | dma_sync_single_for_cpu(dev: rspq->adapter->pdev_dev, |
1825 | addr: get_buf_addr(sdesc), |
1826 | size: fp->size, dir: DMA_FROM_DEVICE); |
1827 | gl.va = (page_address(gl.frags[0].page) + |
1828 | gl.frags[0].offset); |
1829 | prefetch(gl.va); |
1830 | |
1831 | /* |
1832 | * Hand the new ingress packet to the handler for |
1833 | * this Response Queue. |
1834 | */ |
1835 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); |
1836 | if (likely(ret == 0)) |
1837 | rspq->offset += ALIGN(fp->size, s->fl_align); |
1838 | else |
1839 | restore_rx_bufs(gl: &gl, fl: &rxq->fl, frags: frag); |
1840 | } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { |
1841 | ret = rspq->handler(rspq, rspq->cur_desc, NULL); |
1842 | } else { |
1843 | WARN_ON(rsp_type > RSPD_TYPE_CPL_X); |
1844 | ret = 0; |
1845 | } |
1846 | |
1847 | if (unlikely(ret)) { |
1848 | /* |
1849 | * Couldn't process descriptor, back off for recovery. |
1850 | * We use the SGE's last timer which has the longest |
1851 | * interrupt coalescing value ... |
1852 | */ |
1853 | const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; |
1854 | rspq->next_intr_params = |
1855 | QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX); |
1856 | break; |
1857 | } |
1858 | |
1859 | rspq_next(rspq); |
1860 | budget_left--; |
1861 | } |
1862 | |
1863 | /* |
1864 | * If this is a Response Queue with an associated Free List and |
1865 | * at least two Egress Queue units available in the Free List |
1866 | * for new buffer pointers, refill the Free List. |
1867 | */ |
1868 | if (rspq->offset >= 0 && |
1869 | fl_cap(fl: &rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) |
1870 | __refill_fl(adapter: rspq->adapter, fl: &rxq->fl); |
1871 | return budget - budget_left; |
1872 | } |
1873 | |
1874 | /** |
1875 | * napi_rx_handler - the NAPI handler for RX processing |
1876 | * @napi: the napi instance |
1877 | * @budget: how many packets we can process in this round |
1878 | * |
1879 | * Handler for new data events when using NAPI. This does not need any |
1880 | * locking or protection from interrupts as data interrupts are off at |
1881 | * this point and other adapter interrupts do not interfere (the latter |
1882 | * in not a concern at all with MSI-X as non-data interrupts then have |
1883 | * a separate handler). |
1884 | */ |
1885 | static int napi_rx_handler(struct napi_struct *napi, int budget) |
1886 | { |
1887 | unsigned int intr_params; |
1888 | struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); |
1889 | int work_done = process_responses(rspq, budget); |
1890 | u32 val; |
1891 | |
1892 | if (likely(work_done < budget)) { |
1893 | napi_complete_done(n: napi, work_done); |
1894 | intr_params = rspq->next_intr_params; |
1895 | rspq->next_intr_params = rspq->intr_params; |
1896 | } else |
1897 | intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX); |
1898 | |
1899 | if (unlikely(work_done == 0)) |
1900 | rspq->unhandled_irqs++; |
1901 | |
1902 | val = CIDXINC_V(work_done) | SEINTARM_V(intr_params); |
1903 | /* If we don't have access to the new User GTS (T5+), use the old |
1904 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
1905 | */ |
1906 | if (unlikely(!rspq->bar2_addr)) { |
1907 | t4_write_reg(adapter: rspq->adapter, |
1908 | T4VF_SGE_BASE_ADDR + SGE_VF_GTS, |
1909 | val: val | INGRESSQID_V((u32)rspq->cntxt_id)); |
1910 | } else { |
1911 | writel(val: val | INGRESSQID_V(rspq->bar2_qid), |
1912 | addr: rspq->bar2_addr + SGE_UDB_GTS); |
1913 | wmb(); |
1914 | } |
1915 | return work_done; |
1916 | } |
1917 | |
1918 | /* |
1919 | * The MSI-X interrupt handler for an SGE response queue for the NAPI case |
1920 | * (i.e., response queue serviced by NAPI polling). |
1921 | */ |
1922 | irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) |
1923 | { |
1924 | struct sge_rspq *rspq = cookie; |
1925 | |
1926 | napi_schedule(n: &rspq->napi); |
1927 | return IRQ_HANDLED; |
1928 | } |
1929 | |
1930 | /* |
1931 | * Process the indirect interrupt entries in the interrupt queue and kick off |
1932 | * NAPI for each queue that has generated an entry. |
1933 | */ |
1934 | static unsigned int process_intrq(struct adapter *adapter) |
1935 | { |
1936 | struct sge *s = &adapter->sge; |
1937 | struct sge_rspq *intrq = &s->intrq; |
1938 | unsigned int work_done; |
1939 | u32 val; |
1940 | |
1941 | spin_lock(lock: &adapter->sge.intrq_lock); |
1942 | for (work_done = 0; ; work_done++) { |
1943 | const struct rsp_ctrl *rc; |
1944 | unsigned int qid, iq_idx; |
1945 | struct sge_rspq *rspq; |
1946 | |
1947 | /* |
1948 | * Grab the next response from the interrupt queue and bail |
1949 | * out if it's not a new response. |
1950 | */ |
1951 | rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); |
1952 | if (!is_new_response(rc, rspq: intrq)) |
1953 | break; |
1954 | |
1955 | /* |
1956 | * If the response isn't a forwarded interrupt message issue a |
1957 | * error and go on to the next response message. This should |
1958 | * never happen ... |
1959 | */ |
1960 | dma_rmb(); |
1961 | if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) { |
1962 | dev_err(adapter->pdev_dev, |
1963 | "Unexpected INTRQ response type %d\n" , |
1964 | RSPD_TYPE_G(rc->type_gen)); |
1965 | continue; |
1966 | } |
1967 | |
1968 | /* |
1969 | * Extract the Queue ID from the interrupt message and perform |
1970 | * sanity checking to make sure it really refers to one of our |
1971 | * Ingress Queues which is active and matches the queue's ID. |
1972 | * None of these error conditions should ever happen so we may |
1973 | * want to either make them fatal and/or conditionalized under |
1974 | * DEBUG. |
1975 | */ |
1976 | qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid)); |
1977 | iq_idx = IQ_IDX(s, qid); |
1978 | if (unlikely(iq_idx >= MAX_INGQ)) { |
1979 | dev_err(adapter->pdev_dev, |
1980 | "Ingress QID %d out of range\n" , qid); |
1981 | continue; |
1982 | } |
1983 | rspq = s->ingr_map[iq_idx]; |
1984 | if (unlikely(rspq == NULL)) { |
1985 | dev_err(adapter->pdev_dev, |
1986 | "Ingress QID %d RSPQ=NULL\n" , qid); |
1987 | continue; |
1988 | } |
1989 | if (unlikely(rspq->abs_id != qid)) { |
1990 | dev_err(adapter->pdev_dev, |
1991 | "Ingress QID %d refers to RSPQ %d\n" , |
1992 | qid, rspq->abs_id); |
1993 | continue; |
1994 | } |
1995 | |
1996 | /* |
1997 | * Schedule NAPI processing on the indicated Response Queue |
1998 | * and move on to the next entry in the Forwarded Interrupt |
1999 | * Queue. |
2000 | */ |
2001 | napi_schedule(n: &rspq->napi); |
2002 | rspq_next(rspq: intrq); |
2003 | } |
2004 | |
2005 | val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); |
2006 | /* If we don't have access to the new User GTS (T5+), use the old |
2007 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
2008 | */ |
2009 | if (unlikely(!intrq->bar2_addr)) { |
2010 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, |
2011 | val: val | INGRESSQID_V(intrq->cntxt_id)); |
2012 | } else { |
2013 | writel(val: val | INGRESSQID_V(intrq->bar2_qid), |
2014 | addr: intrq->bar2_addr + SGE_UDB_GTS); |
2015 | wmb(); |
2016 | } |
2017 | |
2018 | spin_unlock(lock: &adapter->sge.intrq_lock); |
2019 | |
2020 | return work_done; |
2021 | } |
2022 | |
2023 | /* |
2024 | * The MSI interrupt handler handles data events from SGE response queues as |
2025 | * well as error and other async events as they all use the same MSI vector. |
2026 | */ |
2027 | static irqreturn_t t4vf_intr_msi(int irq, void *cookie) |
2028 | { |
2029 | struct adapter *adapter = cookie; |
2030 | |
2031 | process_intrq(adapter); |
2032 | return IRQ_HANDLED; |
2033 | } |
2034 | |
2035 | /** |
2036 | * t4vf_intr_handler - select the top-level interrupt handler |
2037 | * @adapter: the adapter |
2038 | * |
2039 | * Selects the top-level interrupt handler based on the type of interrupts |
2040 | * (MSI-X or MSI). |
2041 | */ |
2042 | irq_handler_t t4vf_intr_handler(struct adapter *adapter) |
2043 | { |
2044 | BUG_ON((adapter->flags & |
2045 | (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); |
2046 | if (adapter->flags & CXGB4VF_USING_MSIX) |
2047 | return t4vf_sge_intr_msix; |
2048 | else |
2049 | return t4vf_intr_msi; |
2050 | } |
2051 | |
2052 | /** |
2053 | * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues |
2054 | * @t: Rx timer |
2055 | * |
2056 | * Runs periodically from a timer to perform maintenance of SGE RX queues. |
2057 | * |
2058 | * a) Replenishes RX queues that have run out due to memory shortage. |
2059 | * Normally new RX buffers are added when existing ones are consumed but |
2060 | * when out of memory a queue can become empty. We schedule NAPI to do |
2061 | * the actual refill. |
2062 | */ |
2063 | static void sge_rx_timer_cb(struct timer_list *t) |
2064 | { |
2065 | struct adapter *adapter = from_timer(adapter, t, sge.rx_timer); |
2066 | struct sge *s = &adapter->sge; |
2067 | unsigned int i; |
2068 | |
2069 | /* |
2070 | * Scan the "Starving Free Lists" flag array looking for any Free |
2071 | * Lists in need of more free buffers. If we find one and it's not |
2072 | * being actively polled, then bump its "starving" counter and attempt |
2073 | * to refill it. If we're successful in adding enough buffers to push |
2074 | * the Free List over the starving threshold, then we can clear its |
2075 | * "starving" status. |
2076 | */ |
2077 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { |
2078 | unsigned long m; |
2079 | |
2080 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
2081 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; |
2082 | struct sge_fl *fl = s->egr_map[id]; |
2083 | |
2084 | clear_bit(nr: id, addr: s->starving_fl); |
2085 | smp_mb__after_atomic(); |
2086 | |
2087 | /* |
2088 | * Since we are accessing fl without a lock there's a |
2089 | * small probability of a false positive where we |
2090 | * schedule napi but the FL is no longer starving. |
2091 | * No biggie. |
2092 | */ |
2093 | if (fl_starving(adapter, fl)) { |
2094 | struct sge_eth_rxq *rxq; |
2095 | |
2096 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
2097 | if (napi_schedule(n: &rxq->rspq.napi)) |
2098 | fl->starving++; |
2099 | else |
2100 | set_bit(nr: id, addr: s->starving_fl); |
2101 | } |
2102 | } |
2103 | } |
2104 | |
2105 | /* |
2106 | * Reschedule the next scan for starving Free Lists ... |
2107 | */ |
2108 | mod_timer(timer: &s->rx_timer, expires: jiffies + RX_QCHECK_PERIOD); |
2109 | } |
2110 | |
2111 | /** |
2112 | * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues |
2113 | * @t: Tx timer |
2114 | * |
2115 | * Runs periodically from a timer to perform maintenance of SGE TX queues. |
2116 | * |
2117 | * b) Reclaims completed Tx packets for the Ethernet queues. Normally |
2118 | * packets are cleaned up by new Tx packets, this timer cleans up packets |
2119 | * when no new packets are being submitted. This is essential for pktgen, |
2120 | * at least. |
2121 | */ |
2122 | static void sge_tx_timer_cb(struct timer_list *t) |
2123 | { |
2124 | struct adapter *adapter = from_timer(adapter, t, sge.tx_timer); |
2125 | struct sge *s = &adapter->sge; |
2126 | unsigned int i, budget; |
2127 | |
2128 | budget = MAX_TIMER_TX_RECLAIM; |
2129 | i = s->ethtxq_rover; |
2130 | do { |
2131 | struct sge_eth_txq *txq = &s->ethtxq[i]; |
2132 | |
2133 | if (reclaimable(tq: &txq->q) && __netif_tx_trylock(txq: txq->txq)) { |
2134 | int avail = reclaimable(tq: &txq->q); |
2135 | |
2136 | if (avail > budget) |
2137 | avail = budget; |
2138 | |
2139 | free_tx_desc(adapter, tq: &txq->q, n: avail, unmap: true); |
2140 | txq->q.in_use -= avail; |
2141 | __netif_tx_unlock(txq: txq->txq); |
2142 | |
2143 | budget -= avail; |
2144 | if (!budget) |
2145 | break; |
2146 | } |
2147 | |
2148 | i++; |
2149 | if (i >= s->ethqsets) |
2150 | i = 0; |
2151 | } while (i != s->ethtxq_rover); |
2152 | s->ethtxq_rover = i; |
2153 | |
2154 | /* |
2155 | * If we found too many reclaimable packets schedule a timer in the |
2156 | * near future to continue where we left off. Otherwise the next timer |
2157 | * will be at its normal interval. |
2158 | */ |
2159 | mod_timer(timer: &s->tx_timer, expires: jiffies + (budget ? TX_QCHECK_PERIOD : 2)); |
2160 | } |
2161 | |
2162 | /** |
2163 | * bar2_address - return the BAR2 address for an SGE Queue's Registers |
2164 | * @adapter: the adapter |
2165 | * @qid: the SGE Queue ID |
2166 | * @qtype: the SGE Queue Type (Egress or Ingress) |
2167 | * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues |
2168 | * |
2169 | * Returns the BAR2 address for the SGE Queue Registers associated with |
2170 | * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also |
2171 | * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE |
2172 | * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" |
2173 | * Registers are supported (e.g. the Write Combining Doorbell Buffer). |
2174 | */ |
2175 | static void __iomem *bar2_address(struct adapter *adapter, |
2176 | unsigned int qid, |
2177 | enum t4_bar2_qtype qtype, |
2178 | unsigned int *pbar2_qid) |
2179 | { |
2180 | u64 bar2_qoffset; |
2181 | int ret; |
2182 | |
2183 | ret = t4vf_bar2_sge_qregs(adapter, qid, qtype, |
2184 | pbar2_qoffset: &bar2_qoffset, pbar2_qid); |
2185 | if (ret) |
2186 | return NULL; |
2187 | |
2188 | return adapter->bar2 + bar2_qoffset; |
2189 | } |
2190 | |
2191 | /** |
2192 | * t4vf_sge_alloc_rxq - allocate an SGE RX Queue |
2193 | * @adapter: the adapter |
2194 | * @rspq: pointer to to the new rxq's Response Queue to be filled in |
2195 | * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue |
2196 | * @dev: the network device associated with the new rspq |
2197 | * @intr_dest: MSI-X vector index (overriden in MSI mode) |
2198 | * @fl: pointer to the new rxq's Free List to be filled in |
2199 | * @hnd: the interrupt handler to invoke for the rspq |
2200 | */ |
2201 | int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, |
2202 | bool iqasynch, struct net_device *dev, |
2203 | int intr_dest, |
2204 | struct sge_fl *fl, rspq_handler_t hnd) |
2205 | { |
2206 | struct sge *s = &adapter->sge; |
2207 | struct port_info *pi = netdev_priv(dev); |
2208 | struct fw_iq_cmd cmd, rpl; |
2209 | int ret, iqandst, flsz = 0; |
2210 | int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING); |
2211 | |
2212 | /* |
2213 | * If we're using MSI interrupts and we're not initializing the |
2214 | * Forwarded Interrupt Queue itself, then set up this queue for |
2215 | * indirect interrupts to the Forwarded Interrupt Queue. Obviously |
2216 | * the Forwarded Interrupt Queue must be set up before any other |
2217 | * ingress queue ... |
2218 | */ |
2219 | if ((adapter->flags & CXGB4VF_USING_MSI) && |
2220 | rspq != &adapter->sge.intrq) { |
2221 | iqandst = SGE_INTRDST_IQ; |
2222 | intr_dest = adapter->sge.intrq.abs_id; |
2223 | } else |
2224 | iqandst = SGE_INTRDST_PCI; |
2225 | |
2226 | /* |
2227 | * Allocate the hardware ring for the Response Queue. The size needs |
2228 | * to be a multiple of 16 which includes the mandatory status entry |
2229 | * (regardless of whether the Status Page capabilities are enabled or |
2230 | * not). |
2231 | */ |
2232 | rspq->size = roundup(rspq->size, 16); |
2233 | rspq->desc = alloc_ring(dev: adapter->pdev_dev, nelem: rspq->size, hwsize: rspq->iqe_len, |
2234 | swsize: 0, busaddrp: &rspq->phys_addr, NULL, stat_size: 0); |
2235 | if (!rspq->desc) |
2236 | return -ENOMEM; |
2237 | |
2238 | /* |
2239 | * Fill in the Ingress Queue Command. Note: Ideally this code would |
2240 | * be in t4vf_hw.c but there are so many parameters and dependencies |
2241 | * on our Linux SGE state that we would end up having to pass tons of |
2242 | * parameters. We'll have to think about how this might be migrated |
2243 | * into OS-independent common code ... |
2244 | */ |
2245 | memset(&cmd, 0, sizeof(cmd)); |
2246 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | |
2247 | FW_CMD_REQUEST_F | |
2248 | FW_CMD_WRITE_F | |
2249 | FW_CMD_EXEC_F); |
2250 | cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F | |
2251 | FW_IQ_CMD_IQSTART_F | |
2252 | FW_LEN16(cmd)); |
2253 | cmd.type_to_iqandstindex = |
2254 | cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | |
2255 | FW_IQ_CMD_IQASYNCH_V(iqasynch) | |
2256 | FW_IQ_CMD_VIID_V(pi->viid) | |
2257 | FW_IQ_CMD_IQANDST_V(iqandst) | |
2258 | FW_IQ_CMD_IQANUS_V(1) | |
2259 | FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) | |
2260 | FW_IQ_CMD_IQANDSTINDEX_V(intr_dest)); |
2261 | cmd.iqdroprss_to_iqesize = |
2262 | cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | |
2263 | FW_IQ_CMD_IQGTSMODE_F | |
2264 | FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | |
2265 | FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); |
2266 | cmd.iqsize = cpu_to_be16(rspq->size); |
2267 | cmd.iqaddr = cpu_to_be64(rspq->phys_addr); |
2268 | |
2269 | if (fl) { |
2270 | unsigned int chip_ver = |
2271 | CHELSIO_CHIP_VERSION(adapter->params.chip); |
2272 | /* |
2273 | * Allocate the ring for the hardware free list (with space |
2274 | * for its status page) along with the associated software |
2275 | * descriptor ring. The free list size needs to be a multiple |
2276 | * of the Egress Queue Unit and at least 2 Egress Units larger |
2277 | * than the SGE's Egress Congrestion Threshold |
2278 | * (fl_starve_thres - 1). |
2279 | */ |
2280 | if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT) |
2281 | fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT; |
2282 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); |
2283 | fl->desc = alloc_ring(dev: adapter->pdev_dev, nelem: fl->size, |
2284 | hwsize: sizeof(__be64), swsize: sizeof(struct rx_sw_desc), |
2285 | busaddrp: &fl->addr, swringp: &fl->sdesc, stat_size: s->stat_len); |
2286 | if (!fl->desc) { |
2287 | ret = -ENOMEM; |
2288 | goto err; |
2289 | } |
2290 | |
2291 | /* |
2292 | * Calculate the size of the hardware free list ring plus |
2293 | * Status Page (which the SGE will place after the end of the |
2294 | * free list ring) in Egress Queue Units. |
2295 | */ |
2296 | flsz = (fl->size / FL_PER_EQ_UNIT + |
2297 | s->stat_len / EQ_UNIT); |
2298 | |
2299 | /* |
2300 | * Fill in all the relevant firmware Ingress Queue Command |
2301 | * fields for the free list. |
2302 | */ |
2303 | cmd.iqns_to_fl0congen = |
2304 | cpu_to_be32( |
2305 | FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | |
2306 | FW_IQ_CMD_FL0PACKEN_F | |
2307 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | |
2308 | FW_IQ_CMD_FL0DATARO_V(relaxed) | |
2309 | FW_IQ_CMD_FL0PADEN_F); |
2310 | |
2311 | /* In T6, for egress queue type FL there is internal overhead |
2312 | * of 16B for header going into FLM module. Hence the maximum |
2313 | * allowed burst size is 448 bytes. For T4/T5, the hardware |
2314 | * doesn't coalesce fetch requests if more than 64 bytes of |
2315 | * Free List pointers are provided, so we use a 128-byte Fetch |
2316 | * Burst Minimum there (T6 implements coalescing so we can use |
2317 | * the smaller 64-byte value there). |
2318 | */ |
2319 | cmd.fl0dcaen_to_fl0cidxfthresh = |
2320 | cpu_to_be16( |
2321 | FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 |
2322 | ? FETCHBURSTMIN_128B_X |
2323 | : FETCHBURSTMIN_64B_T6_X) | |
2324 | FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? |
2325 | FETCHBURSTMAX_512B_X : |
2326 | FETCHBURSTMAX_256B_X)); |
2327 | cmd.fl0size = cpu_to_be16(flsz); |
2328 | cmd.fl0addr = cpu_to_be64(fl->addr); |
2329 | } |
2330 | |
2331 | /* |
2332 | * Issue the firmware Ingress Queue Command and extract the results if |
2333 | * it completes successfully. |
2334 | */ |
2335 | ret = t4vf_wr_mbox(adapter, cmd: &cmd, size: sizeof(cmd), rpl: &rpl); |
2336 | if (ret) |
2337 | goto err; |
2338 | |
2339 | netif_napi_add(dev, napi: &rspq->napi, poll: napi_rx_handler); |
2340 | rspq->cur_desc = rspq->desc; |
2341 | rspq->cidx = 0; |
2342 | rspq->gen = 1; |
2343 | rspq->next_intr_params = rspq->intr_params; |
2344 | rspq->cntxt_id = be16_to_cpu(rpl.iqid); |
2345 | rspq->bar2_addr = bar2_address(adapter, |
2346 | qid: rspq->cntxt_id, |
2347 | qtype: T4_BAR2_QTYPE_INGRESS, |
2348 | pbar2_qid: &rspq->bar2_qid); |
2349 | rspq->abs_id = be16_to_cpu(rpl.physiqid); |
2350 | rspq->size--; /* subtract status entry */ |
2351 | rspq->adapter = adapter; |
2352 | rspq->netdev = dev; |
2353 | rspq->handler = hnd; |
2354 | |
2355 | /* set offset to -1 to distinguish ingress queues without FL */ |
2356 | rspq->offset = fl ? 0 : -1; |
2357 | |
2358 | if (fl) { |
2359 | fl->cntxt_id = be16_to_cpu(rpl.fl0id); |
2360 | fl->avail = 0; |
2361 | fl->pend_cred = 0; |
2362 | fl->pidx = 0; |
2363 | fl->cidx = 0; |
2364 | fl->alloc_failed = 0; |
2365 | fl->large_alloc_failed = 0; |
2366 | fl->starving = 0; |
2367 | |
2368 | /* Note, we must initialize the BAR2 Free List User Doorbell |
2369 | * information before refilling the Free List! |
2370 | */ |
2371 | fl->bar2_addr = bar2_address(adapter, |
2372 | qid: fl->cntxt_id, |
2373 | qtype: T4_BAR2_QTYPE_EGRESS, |
2374 | pbar2_qid: &fl->bar2_qid); |
2375 | |
2376 | refill_fl(adapter, fl, n: fl_cap(fl), GFP_KERNEL); |
2377 | } |
2378 | |
2379 | return 0; |
2380 | |
2381 | err: |
2382 | /* |
2383 | * An error occurred. Clean up our partial allocation state and |
2384 | * return the error. |
2385 | */ |
2386 | if (rspq->desc) { |
2387 | dma_free_coherent(dev: adapter->pdev_dev, size: rspq->size * rspq->iqe_len, |
2388 | cpu_addr: rspq->desc, dma_handle: rspq->phys_addr); |
2389 | rspq->desc = NULL; |
2390 | } |
2391 | if (fl && fl->desc) { |
2392 | kfree(objp: fl->sdesc); |
2393 | fl->sdesc = NULL; |
2394 | dma_free_coherent(dev: adapter->pdev_dev, size: flsz * EQ_UNIT, |
2395 | cpu_addr: fl->desc, dma_handle: fl->addr); |
2396 | fl->desc = NULL; |
2397 | } |
2398 | return ret; |
2399 | } |
2400 | |
2401 | /** |
2402 | * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue |
2403 | * @adapter: the adapter |
2404 | * @txq: pointer to the new txq to be filled in |
2405 | * @dev: the network device |
2406 | * @devq: the network TX queue associated with the new txq |
2407 | * @iqid: the relative ingress queue ID to which events relating to |
2408 | * the new txq should be directed |
2409 | */ |
2410 | int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, |
2411 | struct net_device *dev, struct netdev_queue *devq, |
2412 | unsigned int iqid) |
2413 | { |
2414 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); |
2415 | struct port_info *pi = netdev_priv(dev); |
2416 | struct fw_eq_eth_cmd cmd, rpl; |
2417 | struct sge *s = &adapter->sge; |
2418 | int ret, nentries; |
2419 | |
2420 | /* |
2421 | * Calculate the size of the hardware TX Queue (including the Status |
2422 | * Page on the end of the TX Queue) in units of TX Descriptors. |
2423 | */ |
2424 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
2425 | |
2426 | /* |
2427 | * Allocate the hardware ring for the TX ring (with space for its |
2428 | * status page) along with the associated software descriptor ring. |
2429 | */ |
2430 | txq->q.desc = alloc_ring(dev: adapter->pdev_dev, nelem: txq->q.size, |
2431 | hwsize: sizeof(struct tx_desc), |
2432 | swsize: sizeof(struct tx_sw_desc), |
2433 | busaddrp: &txq->q.phys_addr, swringp: &txq->q.sdesc, stat_size: s->stat_len); |
2434 | if (!txq->q.desc) |
2435 | return -ENOMEM; |
2436 | |
2437 | /* |
2438 | * Fill in the Egress Queue Command. Note: As with the direct use of |
2439 | * the firmware Ingress Queue COmmand above in our RXQ allocation |
2440 | * routine, ideally, this code would be in t4vf_hw.c. Again, we'll |
2441 | * have to see if there's some reasonable way to parameterize it |
2442 | * into the common code ... |
2443 | */ |
2444 | memset(&cmd, 0, sizeof(cmd)); |
2445 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | |
2446 | FW_CMD_REQUEST_F | |
2447 | FW_CMD_WRITE_F | |
2448 | FW_CMD_EXEC_F); |
2449 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | |
2450 | FW_EQ_ETH_CMD_EQSTART_F | |
2451 | FW_LEN16(cmd)); |
2452 | cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | |
2453 | FW_EQ_ETH_CMD_VIID_V(pi->viid)); |
2454 | cmd.fetchszm_to_iqid = |
2455 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | |
2456 | FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | |
2457 | FW_EQ_ETH_CMD_IQID_V(iqid)); |
2458 | cmd.dcaen_to_eqsize = |
2459 | cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 |
2460 | ? FETCHBURSTMIN_64B_X |
2461 | : FETCHBURSTMIN_64B_T6_X) | |
2462 | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | |
2463 | FW_EQ_ETH_CMD_CIDXFTHRESH_V( |
2464 | CIDXFLUSHTHRESH_32_X) | |
2465 | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); |
2466 | cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); |
2467 | |
2468 | /* |
2469 | * Issue the firmware Egress Queue Command and extract the results if |
2470 | * it completes successfully. |
2471 | */ |
2472 | ret = t4vf_wr_mbox(adapter, cmd: &cmd, size: sizeof(cmd), rpl: &rpl); |
2473 | if (ret) { |
2474 | /* |
2475 | * The girmware Ingress Queue Command failed for some reason. |
2476 | * Free up our partial allocation state and return the error. |
2477 | */ |
2478 | kfree(objp: txq->q.sdesc); |
2479 | txq->q.sdesc = NULL; |
2480 | dma_free_coherent(dev: adapter->pdev_dev, |
2481 | size: nentries * sizeof(struct tx_desc), |
2482 | cpu_addr: txq->q.desc, dma_handle: txq->q.phys_addr); |
2483 | txq->q.desc = NULL; |
2484 | return ret; |
2485 | } |
2486 | |
2487 | txq->q.in_use = 0; |
2488 | txq->q.cidx = 0; |
2489 | txq->q.pidx = 0; |
2490 | txq->q.stat = (void *)&txq->q.desc[txq->q.size]; |
2491 | txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); |
2492 | txq->q.bar2_addr = bar2_address(adapter, |
2493 | qid: txq->q.cntxt_id, |
2494 | qtype: T4_BAR2_QTYPE_EGRESS, |
2495 | pbar2_qid: &txq->q.bar2_qid); |
2496 | txq->q.abs_id = |
2497 | FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd)); |
2498 | txq->txq = devq; |
2499 | txq->tso = 0; |
2500 | txq->tx_cso = 0; |
2501 | txq->vlan_ins = 0; |
2502 | txq->q.stops = 0; |
2503 | txq->q.restarts = 0; |
2504 | txq->mapping_err = 0; |
2505 | return 0; |
2506 | } |
2507 | |
2508 | /* |
2509 | * Free the DMA map resources associated with a TX queue. |
2510 | */ |
2511 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) |
2512 | { |
2513 | struct sge *s = &adapter->sge; |
2514 | |
2515 | dma_free_coherent(dev: adapter->pdev_dev, |
2516 | size: tq->size * sizeof(*tq->desc) + s->stat_len, |
2517 | cpu_addr: tq->desc, dma_handle: tq->phys_addr); |
2518 | tq->cntxt_id = 0; |
2519 | tq->sdesc = NULL; |
2520 | tq->desc = NULL; |
2521 | } |
2522 | |
2523 | /* |
2524 | * Free the resources associated with a response queue (possibly including a |
2525 | * free list). |
2526 | */ |
2527 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, |
2528 | struct sge_fl *fl) |
2529 | { |
2530 | struct sge *s = &adapter->sge; |
2531 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; |
2532 | |
2533 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, |
2534 | rspq->cntxt_id, flid, 0xffff); |
2535 | dma_free_coherent(dev: adapter->pdev_dev, size: (rspq->size + 1) * rspq->iqe_len, |
2536 | cpu_addr: rspq->desc, dma_handle: rspq->phys_addr); |
2537 | netif_napi_del(napi: &rspq->napi); |
2538 | rspq->netdev = NULL; |
2539 | rspq->cntxt_id = 0; |
2540 | rspq->abs_id = 0; |
2541 | rspq->desc = NULL; |
2542 | |
2543 | if (fl) { |
2544 | free_rx_bufs(adapter, fl, n: fl->avail); |
2545 | dma_free_coherent(dev: adapter->pdev_dev, |
2546 | size: fl->size * sizeof(*fl->desc) + s->stat_len, |
2547 | cpu_addr: fl->desc, dma_handle: fl->addr); |
2548 | kfree(objp: fl->sdesc); |
2549 | fl->sdesc = NULL; |
2550 | fl->cntxt_id = 0; |
2551 | fl->desc = NULL; |
2552 | } |
2553 | } |
2554 | |
2555 | /** |
2556 | * t4vf_free_sge_resources - free SGE resources |
2557 | * @adapter: the adapter |
2558 | * |
2559 | * Frees resources used by the SGE queue sets. |
2560 | */ |
2561 | void t4vf_free_sge_resources(struct adapter *adapter) |
2562 | { |
2563 | struct sge *s = &adapter->sge; |
2564 | struct sge_eth_rxq *rxq = s->ethrxq; |
2565 | struct sge_eth_txq *txq = s->ethtxq; |
2566 | struct sge_rspq *evtq = &s->fw_evtq; |
2567 | struct sge_rspq *intrq = &s->intrq; |
2568 | int qs; |
2569 | |
2570 | for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { |
2571 | if (rxq->rspq.desc) |
2572 | free_rspq_fl(adapter, rspq: &rxq->rspq, fl: &rxq->fl); |
2573 | if (txq->q.desc) { |
2574 | t4vf_eth_eq_free(adapter, txq->q.cntxt_id); |
2575 | free_tx_desc(adapter, tq: &txq->q, n: txq->q.in_use, unmap: true); |
2576 | kfree(objp: txq->q.sdesc); |
2577 | free_txq(adapter, tq: &txq->q); |
2578 | } |
2579 | } |
2580 | if (evtq->desc) |
2581 | free_rspq_fl(adapter, rspq: evtq, NULL); |
2582 | if (intrq->desc) |
2583 | free_rspq_fl(adapter, rspq: intrq, NULL); |
2584 | } |
2585 | |
2586 | /** |
2587 | * t4vf_sge_start - enable SGE operation |
2588 | * @adapter: the adapter |
2589 | * |
2590 | * Start tasklets and timers associated with the DMA engine. |
2591 | */ |
2592 | void t4vf_sge_start(struct adapter *adapter) |
2593 | { |
2594 | adapter->sge.ethtxq_rover = 0; |
2595 | mod_timer(timer: &adapter->sge.rx_timer, expires: jiffies + RX_QCHECK_PERIOD); |
2596 | mod_timer(timer: &adapter->sge.tx_timer, expires: jiffies + TX_QCHECK_PERIOD); |
2597 | } |
2598 | |
2599 | /** |
2600 | * t4vf_sge_stop - disable SGE operation |
2601 | * @adapter: the adapter |
2602 | * |
2603 | * Stop tasklets and timers associated with the DMA engine. Note that |
2604 | * this is effective only if measures have been taken to disable any HW |
2605 | * events that may restart them. |
2606 | */ |
2607 | void t4vf_sge_stop(struct adapter *adapter) |
2608 | { |
2609 | struct sge *s = &adapter->sge; |
2610 | |
2611 | if (s->rx_timer.function) |
2612 | del_timer_sync(timer: &s->rx_timer); |
2613 | if (s->tx_timer.function) |
2614 | del_timer_sync(timer: &s->tx_timer); |
2615 | } |
2616 | |
2617 | /** |
2618 | * t4vf_sge_init - initialize SGE |
2619 | * @adapter: the adapter |
2620 | * |
2621 | * Performs SGE initialization needed every time after a chip reset. |
2622 | * We do not initialize any of the queue sets here, instead the driver |
2623 | * top-level must request those individually. We also do not enable DMA |
2624 | * here, that should be done after the queues have been set up. |
2625 | */ |
2626 | int t4vf_sge_init(struct adapter *adapter) |
2627 | { |
2628 | struct sge_params *sge_params = &adapter->params.sge; |
2629 | u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; |
2630 | u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; |
2631 | struct sge *s = &adapter->sge; |
2632 | |
2633 | /* |
2634 | * Start by vetting the basic SGE parameters which have been set up by |
2635 | * the Physical Function Driver. Ideally we should be able to deal |
2636 | * with _any_ configuration. Practice is different ... |
2637 | */ |
2638 | |
2639 | /* We only bother using the Large Page logic if the Large Page Buffer |
2640 | * is larger than our Page Size Buffer. |
2641 | */ |
2642 | if (fl_large_pg <= fl_small_pg) |
2643 | fl_large_pg = 0; |
2644 | |
2645 | /* The Page Size Buffer must be exactly equal to our Page Size and the |
2646 | * Large Page Size Buffer should be 0 (per above) or a power of 2. |
2647 | */ |
2648 | if (fl_small_pg != PAGE_SIZE || |
2649 | (fl_large_pg & (fl_large_pg - 1)) != 0) { |
2650 | dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n" , |
2651 | fl_small_pg, fl_large_pg); |
2652 | return -EINVAL; |
2653 | } |
2654 | if ((sge_params->sge_control & RXPKTCPLMODE_F) != |
2655 | RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { |
2656 | dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n" ); |
2657 | return -EINVAL; |
2658 | } |
2659 | |
2660 | /* |
2661 | * Now translate the adapter parameters into our internal forms. |
2662 | */ |
2663 | if (fl_large_pg) |
2664 | s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; |
2665 | s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) |
2666 | ? 128 : 64); |
2667 | s->pktshift = PKTSHIFT_G(sge_params->sge_control); |
2668 | s->fl_align = t4vf_fl_pkt_align(adapter); |
2669 | |
2670 | /* A FL with <= fl_starve_thres buffers is starving and a periodic |
2671 | * timer will attempt to refill it. This needs to be larger than the |
2672 | * SGE's Egress Congestion Threshold. If it isn't, then we can get |
2673 | * stuck waiting for new packets while the SGE is waiting for us to |
2674 | * give it more Free List entries. (Note that the SGE's Egress |
2675 | * Congestion Threshold is in units of 2 Free List pointers.) |
2676 | */ |
2677 | switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { |
2678 | case CHELSIO_T4: |
2679 | s->fl_starve_thres = |
2680 | EGRTHRESHOLD_G(sge_params->sge_congestion_control); |
2681 | break; |
2682 | case CHELSIO_T5: |
2683 | s->fl_starve_thres = |
2684 | EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); |
2685 | break; |
2686 | case CHELSIO_T6: |
2687 | default: |
2688 | s->fl_starve_thres = |
2689 | T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); |
2690 | break; |
2691 | } |
2692 | s->fl_starve_thres = s->fl_starve_thres * 2 + 1; |
2693 | |
2694 | /* |
2695 | * Set up tasklet timers. |
2696 | */ |
2697 | timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); |
2698 | timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); |
2699 | |
2700 | /* |
2701 | * Initialize Forwarded Interrupt Queue lock. |
2702 | */ |
2703 | spin_lock_init(&s->intrq_lock); |
2704 | |
2705 | return 0; |
2706 | } |
2707 | |