1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /***************************************************************************** |
3 | * * |
4 | * File: sge.c * |
5 | * $Revision: 1.26 $ * |
6 | * $Date: 2005/06/21 18:29:48 $ * |
7 | * Description: * |
8 | * DMA engine. * |
9 | * part of the Chelsio 10Gb Ethernet Driver. * |
10 | * * |
11 | * * |
12 | * http://www.chelsio.com * |
13 | * * |
14 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * |
15 | * All rights reserved. * |
16 | * * |
17 | * Maintainers: maintainers@chelsio.com * |
18 | * * |
19 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * |
20 | * Tina Yang <tainay@chelsio.com> * |
21 | * Felix Marti <felix@chelsio.com> * |
22 | * Scott Bardone <sbardone@chelsio.com> * |
23 | * Kurt Ottaway <kottaway@chelsio.com> * |
24 | * Frank DiMambro <frank@chelsio.com> * |
25 | * * |
26 | * History: * |
27 | * * |
28 | ****************************************************************************/ |
29 | |
30 | #include "common.h" |
31 | |
32 | #include <linux/types.h> |
33 | #include <linux/errno.h> |
34 | #include <linux/pci.h> |
35 | #include <linux/ktime.h> |
36 | #include <linux/netdevice.h> |
37 | #include <linux/etherdevice.h> |
38 | #include <linux/if_vlan.h> |
39 | #include <linux/skbuff.h> |
40 | #include <linux/mm.h> |
41 | #include <linux/tcp.h> |
42 | #include <linux/ip.h> |
43 | #include <linux/in.h> |
44 | #include <linux/if_arp.h> |
45 | #include <linux/slab.h> |
46 | #include <linux/prefetch.h> |
47 | |
48 | #include "cpl5_cmd.h" |
49 | #include "sge.h" |
50 | #include "regs.h" |
51 | #include "espi.h" |
52 | |
53 | /* This belongs in if_ether.h */ |
54 | #define ETH_P_CPL5 0xf |
55 | |
56 | #define SGE_CMDQ_N 2 |
57 | #define SGE_FREELQ_N 2 |
58 | #define SGE_CMDQ0_E_N 1024 |
59 | #define SGE_CMDQ1_E_N 128 |
60 | #define SGE_FREEL_SIZE 4096 |
61 | #define SGE_JUMBO_FREEL_SIZE 512 |
62 | #define SGE_FREEL_REFILL_THRESH 16 |
63 | #define SGE_RESPQ_E_N 1024 |
64 | #define SGE_INTRTIMER_NRES 1000 |
65 | #define SGE_RX_SM_BUF_SIZE 1536 |
66 | #define SGE_TX_DESC_MAX_PLEN 16384 |
67 | |
68 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) |
69 | |
70 | /* |
71 | * Period of the TX buffer reclaim timer. This timer does not need to run |
72 | * frequently as TX buffers are usually reclaimed by new TX packets. |
73 | */ |
74 | #define TX_RECLAIM_PERIOD (HZ / 4) |
75 | |
76 | #define M_CMD_LEN 0x7fffffff |
77 | #define V_CMD_LEN(v) (v) |
78 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) |
79 | #define V_CMD_GEN1(v) ((v) << 31) |
80 | #define V_CMD_GEN2(v) (v) |
81 | #define F_CMD_DATAVALID (1 << 1) |
82 | #define F_CMD_SOP (1 << 2) |
83 | #define V_CMD_EOP(v) ((v) << 3) |
84 | |
85 | /* |
86 | * Command queue, receive buffer list, and response queue descriptors. |
87 | */ |
88 | #if defined(__BIG_ENDIAN_BITFIELD) |
89 | struct cmdQ_e { |
90 | u32 addr_lo; |
91 | u32 len_gen; |
92 | u32 flags; |
93 | u32 addr_hi; |
94 | }; |
95 | |
96 | struct freelQ_e { |
97 | u32 addr_lo; |
98 | u32 len_gen; |
99 | u32 gen2; |
100 | u32 addr_hi; |
101 | }; |
102 | |
103 | struct respQ_e { |
104 | u32 Qsleeping : 4; |
105 | u32 Cmdq1CreditReturn : 5; |
106 | u32 Cmdq1DmaComplete : 5; |
107 | u32 Cmdq0CreditReturn : 5; |
108 | u32 Cmdq0DmaComplete : 5; |
109 | u32 FreelistQid : 2; |
110 | u32 CreditValid : 1; |
111 | u32 DataValid : 1; |
112 | u32 Offload : 1; |
113 | u32 Eop : 1; |
114 | u32 Sop : 1; |
115 | u32 GenerationBit : 1; |
116 | u32 BufferLength; |
117 | }; |
118 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
119 | struct cmdQ_e { |
120 | u32 len_gen; |
121 | u32 addr_lo; |
122 | u32 addr_hi; |
123 | u32 flags; |
124 | }; |
125 | |
126 | struct freelQ_e { |
127 | u32 len_gen; |
128 | u32 addr_lo; |
129 | u32 addr_hi; |
130 | u32 gen2; |
131 | }; |
132 | |
133 | struct respQ_e { |
134 | u32 BufferLength; |
135 | u32 GenerationBit : 1; |
136 | u32 Sop : 1; |
137 | u32 Eop : 1; |
138 | u32 Offload : 1; |
139 | u32 DataValid : 1; |
140 | u32 CreditValid : 1; |
141 | u32 FreelistQid : 2; |
142 | u32 Cmdq0DmaComplete : 5; |
143 | u32 Cmdq0CreditReturn : 5; |
144 | u32 Cmdq1DmaComplete : 5; |
145 | u32 Cmdq1CreditReturn : 5; |
146 | u32 Qsleeping : 4; |
147 | } ; |
148 | #endif |
149 | |
150 | /* |
151 | * SW Context Command and Freelist Queue Descriptors |
152 | */ |
153 | struct cmdQ_ce { |
154 | struct sk_buff *skb; |
155 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
156 | DEFINE_DMA_UNMAP_LEN(dma_len); |
157 | }; |
158 | |
159 | struct freelQ_ce { |
160 | struct sk_buff *skb; |
161 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
162 | DEFINE_DMA_UNMAP_LEN(dma_len); |
163 | }; |
164 | |
165 | /* |
166 | * SW command, freelist and response rings |
167 | */ |
168 | struct cmdQ { |
169 | unsigned long status; /* HW DMA fetch status */ |
170 | unsigned int in_use; /* # of in-use command descriptors */ |
171 | unsigned int size; /* # of descriptors */ |
172 | unsigned int processed; /* total # of descs HW has processed */ |
173 | unsigned int cleaned; /* total # of descs SW has reclaimed */ |
174 | unsigned int stop_thres; /* SW TX queue suspend threshold */ |
175 | u16 pidx; /* producer index (SW) */ |
176 | u16 cidx; /* consumer index (HW) */ |
177 | u8 genbit; /* current generation (=valid) bit */ |
178 | u8 sop; /* is next entry start of packet? */ |
179 | struct cmdQ_e *entries; /* HW command descriptor Q */ |
180 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ |
181 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ |
182 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ |
183 | }; |
184 | |
185 | struct freelQ { |
186 | unsigned int credits; /* # of available RX buffers */ |
187 | unsigned int size; /* free list capacity */ |
188 | u16 pidx; /* producer index (SW) */ |
189 | u16 cidx; /* consumer index (HW) */ |
190 | u16 rx_buffer_size; /* Buffer size on this free list */ |
191 | u16 dma_offset; /* DMA offset to align IP headers */ |
192 | u16 recycleq_idx; /* skb recycle q to use */ |
193 | u8 genbit; /* current generation (=valid) bit */ |
194 | struct freelQ_e *entries; /* HW freelist descriptor Q */ |
195 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ |
196 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ |
197 | }; |
198 | |
199 | struct respQ { |
200 | unsigned int credits; /* credits to be returned to SGE */ |
201 | unsigned int size; /* # of response Q descriptors */ |
202 | u16 cidx; /* consumer index (SW) */ |
203 | u8 genbit; /* current generation(=valid) bit */ |
204 | struct respQ_e *entries; /* HW response descriptor Q */ |
205 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ |
206 | }; |
207 | |
208 | /* Bit flags for cmdQ.status */ |
209 | enum { |
210 | CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ |
211 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ |
212 | }; |
213 | |
214 | /* T204 TX SW scheduler */ |
215 | |
216 | /* Per T204 TX port */ |
217 | struct sched_port { |
218 | unsigned int avail; /* available bits - quota */ |
219 | unsigned int drain_bits_per_1024ns; /* drain rate */ |
220 | unsigned int speed; /* drain rate, mbps */ |
221 | unsigned int mtu; /* mtu size */ |
222 | struct sk_buff_head skbq; /* pending skbs */ |
223 | }; |
224 | |
225 | /* Per T204 device */ |
226 | struct sched { |
227 | ktime_t last_updated; /* last time quotas were computed */ |
228 | unsigned int max_avail; /* max bits to be sent to any port */ |
229 | unsigned int port; /* port index (round robin ports) */ |
230 | unsigned int num; /* num skbs in per port queues */ |
231 | struct sched_port p[MAX_NPORTS]; |
232 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ |
233 | struct sge *sge; |
234 | }; |
235 | |
236 | static void restart_sched(struct tasklet_struct *t); |
237 | |
238 | |
239 | /* |
240 | * Main SGE data structure |
241 | * |
242 | * Interrupts are handled by a single CPU and it is likely that on a MP system |
243 | * the application is migrated to another CPU. In that scenario, we try to |
244 | * separate the RX(in irq context) and TX state in order to decrease memory |
245 | * contention. |
246 | */ |
247 | struct sge { |
248 | struct adapter *adapter; /* adapter backpointer */ |
249 | struct net_device *netdev; /* netdevice backpointer */ |
250 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ |
251 | struct respQ respQ; /* response Q */ |
252 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ |
253 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
254 | unsigned int jumbo_fl; /* jumbo freelist Q index */ |
255 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ |
256 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ |
257 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
258 | struct timer_list espibug_timer; |
259 | unsigned long espibug_timeout; |
260 | struct sk_buff *espibug_skb[MAX_NPORTS]; |
261 | u32 sge_control; /* shadow value of sge control reg */ |
262 | struct sge_intr_counts stats; |
263 | struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; |
264 | struct sched *tx_sched; |
265 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; |
266 | }; |
267 | |
268 | static const u8 ch_mac_addr[ETH_ALEN] = { |
269 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 |
270 | }; |
271 | |
272 | /* |
273 | * stop tasklet and free all pending skb's |
274 | */ |
275 | static void tx_sched_stop(struct sge *sge) |
276 | { |
277 | struct sched *s = sge->tx_sched; |
278 | int i; |
279 | |
280 | tasklet_kill(t: &s->sched_tsk); |
281 | |
282 | for (i = 0; i < MAX_NPORTS; i++) |
283 | __skb_queue_purge(list: &s->p[s->port].skbq); |
284 | } |
285 | |
286 | /* |
287 | * t1_sched_update_parms() is called when the MTU or link speed changes. It |
288 | * re-computes scheduler parameters to scope with the change. |
289 | */ |
290 | unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, |
291 | unsigned int mtu, unsigned int speed) |
292 | { |
293 | struct sched *s = sge->tx_sched; |
294 | struct sched_port *p = &s->p[port]; |
295 | unsigned int max_avail_segs; |
296 | |
297 | pr_debug("%s mtu=%d speed=%d\n" , __func__, mtu, speed); |
298 | if (speed) |
299 | p->speed = speed; |
300 | if (mtu) |
301 | p->mtu = mtu; |
302 | |
303 | if (speed || mtu) { |
304 | unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); |
305 | do_div(drain, (p->mtu + 50) * 1000); |
306 | p->drain_bits_per_1024ns = (unsigned int) drain; |
307 | |
308 | if (p->speed < 1000) |
309 | p->drain_bits_per_1024ns = |
310 | 90 * p->drain_bits_per_1024ns / 100; |
311 | } |
312 | |
313 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { |
314 | p->drain_bits_per_1024ns -= 16; |
315 | s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); |
316 | max_avail_segs = max(1U, 4096 / (p->mtu - 40)); |
317 | } else { |
318 | s->max_avail = 16384; |
319 | max_avail_segs = max(1U, 9000 / (p->mtu - 40)); |
320 | } |
321 | |
322 | pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " |
323 | "max_avail_segs %u drain_bits_per_1024ns %u\n" , p->mtu, |
324 | p->speed, s->max_avail, max_avail_segs, |
325 | p->drain_bits_per_1024ns); |
326 | |
327 | return max_avail_segs * (p->mtu - 40); |
328 | } |
329 | |
330 | #if 0 |
331 | |
332 | /* |
333 | * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of |
334 | * data that can be pushed per port. |
335 | */ |
336 | void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) |
337 | { |
338 | struct sched *s = sge->tx_sched; |
339 | unsigned int i; |
340 | |
341 | s->max_avail = val; |
342 | for (i = 0; i < MAX_NPORTS; i++) |
343 | t1_sched_update_parms(sge, i, 0, 0); |
344 | } |
345 | |
346 | /* |
347 | * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port |
348 | * is draining. |
349 | */ |
350 | void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, |
351 | unsigned int val) |
352 | { |
353 | struct sched *s = sge->tx_sched; |
354 | struct sched_port *p = &s->p[port]; |
355 | p->drain_bits_per_1024ns = val * 1024 / 1000; |
356 | t1_sched_update_parms(sge, port, 0, 0); |
357 | } |
358 | |
359 | #endif /* 0 */ |
360 | |
361 | /* |
362 | * tx_sched_init() allocates resources and does basic initialization. |
363 | */ |
364 | static int tx_sched_init(struct sge *sge) |
365 | { |
366 | struct sched *s; |
367 | int i; |
368 | |
369 | s = kzalloc(size: sizeof (struct sched), GFP_KERNEL); |
370 | if (!s) |
371 | return -ENOMEM; |
372 | |
373 | pr_debug("tx_sched_init\n" ); |
374 | tasklet_setup(t: &s->sched_tsk, callback: restart_sched); |
375 | s->sge = sge; |
376 | sge->tx_sched = s; |
377 | |
378 | for (i = 0; i < MAX_NPORTS; i++) { |
379 | skb_queue_head_init(list: &s->p[i].skbq); |
380 | t1_sched_update_parms(sge, port: i, mtu: 1500, speed: 1000); |
381 | } |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | /* |
387 | * sched_update_avail() computes the delta since the last time it was called |
388 | * and updates the per port quota (number of bits that can be sent to the any |
389 | * port). |
390 | */ |
391 | static inline int sched_update_avail(struct sge *sge) |
392 | { |
393 | struct sched *s = sge->tx_sched; |
394 | ktime_t now = ktime_get(); |
395 | unsigned int i; |
396 | long long delta_time_ns; |
397 | |
398 | delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); |
399 | |
400 | pr_debug("sched_update_avail delta=%lld\n" , delta_time_ns); |
401 | if (delta_time_ns < 15000) |
402 | return 0; |
403 | |
404 | for (i = 0; i < MAX_NPORTS; i++) { |
405 | struct sched_port *p = &s->p[i]; |
406 | unsigned int delta_avail; |
407 | |
408 | delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; |
409 | p->avail = min(p->avail + delta_avail, s->max_avail); |
410 | } |
411 | |
412 | s->last_updated = now; |
413 | |
414 | return 1; |
415 | } |
416 | |
417 | /* |
418 | * sched_skb() is called from two different places. In the tx path, any |
419 | * packet generating load on an output port will call sched_skb() |
420 | * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq |
421 | * context (skb == NULL). |
422 | * The scheduler only returns a skb (which will then be sent) if the |
423 | * length of the skb is <= the current quota of the output port. |
424 | */ |
425 | static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, |
426 | unsigned int credits) |
427 | { |
428 | struct sched *s = sge->tx_sched; |
429 | struct sk_buff_head *skbq; |
430 | unsigned int i, len, update = 1; |
431 | |
432 | pr_debug("sched_skb %p\n" , skb); |
433 | if (!skb) { |
434 | if (!s->num) |
435 | return NULL; |
436 | } else { |
437 | skbq = &s->p[skb->dev->if_port].skbq; |
438 | __skb_queue_tail(list: skbq, newsk: skb); |
439 | s->num++; |
440 | skb = NULL; |
441 | } |
442 | |
443 | if (credits < MAX_SKB_FRAGS + 1) |
444 | goto out; |
445 | |
446 | again: |
447 | for (i = 0; i < MAX_NPORTS; i++) { |
448 | s->port = (s->port + 1) & (MAX_NPORTS - 1); |
449 | skbq = &s->p[s->port].skbq; |
450 | |
451 | skb = skb_peek(list_: skbq); |
452 | |
453 | if (!skb) |
454 | continue; |
455 | |
456 | len = skb->len; |
457 | if (len <= s->p[s->port].avail) { |
458 | s->p[s->port].avail -= len; |
459 | s->num--; |
460 | __skb_unlink(skb, list: skbq); |
461 | goto out; |
462 | } |
463 | skb = NULL; |
464 | } |
465 | |
466 | if (update-- && sched_update_avail(sge)) |
467 | goto again; |
468 | |
469 | out: |
470 | /* If there are more pending skbs, we use the hardware to schedule us |
471 | * again. |
472 | */ |
473 | if (s->num && !skb) { |
474 | struct cmdQ *q = &sge->cmdQ[0]; |
475 | clear_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
476 | if (test_and_set_bit(nr: CMDQ_STAT_RUNNING, addr: &q->status) == 0) { |
477 | set_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
478 | writel(F_CMDQ0_ENABLE, addr: sge->adapter->regs + A_SG_DOORBELL); |
479 | } |
480 | } |
481 | pr_debug("sched_skb ret %p\n" , skb); |
482 | |
483 | return skb; |
484 | } |
485 | |
486 | /* |
487 | * PIO to indicate that memory mapped Q contains valid descriptor(s). |
488 | */ |
489 | static inline void doorbell_pio(struct adapter *adapter, u32 val) |
490 | { |
491 | wmb(); |
492 | writel(val, addr: adapter->regs + A_SG_DOORBELL); |
493 | } |
494 | |
495 | /* |
496 | * Frees all RX buffers on the freelist Q. The caller must make sure that |
497 | * the SGE is turned off before calling this function. |
498 | */ |
499 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) |
500 | { |
501 | unsigned int cidx = q->cidx; |
502 | |
503 | while (q->credits--) { |
504 | struct freelQ_ce *ce = &q->centries[cidx]; |
505 | |
506 | dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
507 | dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
508 | dev_kfree_skb(ce->skb); |
509 | ce->skb = NULL; |
510 | if (++cidx == q->size) |
511 | cidx = 0; |
512 | } |
513 | } |
514 | |
515 | /* |
516 | * Free RX free list and response queue resources. |
517 | */ |
518 | static void free_rx_resources(struct sge *sge) |
519 | { |
520 | struct pci_dev *pdev = sge->adapter->pdev; |
521 | unsigned int size, i; |
522 | |
523 | if (sge->respQ.entries) { |
524 | size = sizeof(struct respQ_e) * sge->respQ.size; |
525 | dma_free_coherent(dev: &pdev->dev, size, cpu_addr: sge->respQ.entries, |
526 | dma_handle: sge->respQ.dma_addr); |
527 | } |
528 | |
529 | for (i = 0; i < SGE_FREELQ_N; i++) { |
530 | struct freelQ *q = &sge->freelQ[i]; |
531 | |
532 | if (q->centries) { |
533 | free_freelQ_buffers(pdev, q); |
534 | kfree(objp: q->centries); |
535 | } |
536 | if (q->entries) { |
537 | size = sizeof(struct freelQ_e) * q->size; |
538 | dma_free_coherent(dev: &pdev->dev, size, cpu_addr: q->entries, |
539 | dma_handle: q->dma_addr); |
540 | } |
541 | } |
542 | } |
543 | |
544 | /* |
545 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a |
546 | * response queue. |
547 | */ |
548 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) |
549 | { |
550 | struct pci_dev *pdev = sge->adapter->pdev; |
551 | unsigned int size, i; |
552 | |
553 | for (i = 0; i < SGE_FREELQ_N; i++) { |
554 | struct freelQ *q = &sge->freelQ[i]; |
555 | |
556 | q->genbit = 1; |
557 | q->size = p->freelQ_size[i]; |
558 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
559 | size = sizeof(struct freelQ_e) * q->size; |
560 | q->entries = dma_alloc_coherent(dev: &pdev->dev, size, |
561 | dma_handle: &q->dma_addr, GFP_KERNEL); |
562 | if (!q->entries) |
563 | goto err_no_mem; |
564 | |
565 | size = sizeof(struct freelQ_ce) * q->size; |
566 | q->centries = kzalloc(size, GFP_KERNEL); |
567 | if (!q->centries) |
568 | goto err_no_mem; |
569 | } |
570 | |
571 | /* |
572 | * Calculate the buffer sizes for the two free lists. FL0 accommodates |
573 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, |
574 | * including all the sk_buff overhead. |
575 | * |
576 | * Note: For T2 FL0 and FL1 are reversed. |
577 | */ |
578 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + |
579 | sizeof(struct cpl_rx_data) + |
580 | sge->freelQ[!sge->jumbo_fl].dma_offset; |
581 | |
582 | size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
583 | |
584 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; |
585 | |
586 | /* |
587 | * Setup which skb recycle Q should be used when recycling buffers from |
588 | * each free list. |
589 | */ |
590 | sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; |
591 | sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; |
592 | |
593 | sge->respQ.genbit = 1; |
594 | sge->respQ.size = SGE_RESPQ_E_N; |
595 | sge->respQ.credits = 0; |
596 | size = sizeof(struct respQ_e) * sge->respQ.size; |
597 | sge->respQ.entries = |
598 | dma_alloc_coherent(dev: &pdev->dev, size, dma_handle: &sge->respQ.dma_addr, |
599 | GFP_KERNEL); |
600 | if (!sge->respQ.entries) |
601 | goto err_no_mem; |
602 | return 0; |
603 | |
604 | err_no_mem: |
605 | free_rx_resources(sge); |
606 | return -ENOMEM; |
607 | } |
608 | |
609 | /* |
610 | * Reclaims n TX descriptors and frees the buffers associated with them. |
611 | */ |
612 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) |
613 | { |
614 | struct cmdQ_ce *ce; |
615 | struct pci_dev *pdev = sge->adapter->pdev; |
616 | unsigned int cidx = q->cidx; |
617 | |
618 | q->in_use -= n; |
619 | ce = &q->centries[cidx]; |
620 | while (n--) { |
621 | if (likely(dma_unmap_len(ce, dma_len))) { |
622 | dma_unmap_single(&pdev->dev, |
623 | dma_unmap_addr(ce, dma_addr), |
624 | dma_unmap_len(ce, dma_len), |
625 | DMA_TO_DEVICE); |
626 | if (q->sop) |
627 | q->sop = 0; |
628 | } |
629 | if (ce->skb) { |
630 | dev_kfree_skb_any(skb: ce->skb); |
631 | q->sop = 1; |
632 | } |
633 | ce++; |
634 | if (++cidx == q->size) { |
635 | cidx = 0; |
636 | ce = q->centries; |
637 | } |
638 | } |
639 | q->cidx = cidx; |
640 | } |
641 | |
642 | /* |
643 | * Free TX resources. |
644 | * |
645 | * Assumes that SGE is stopped and all interrupts are disabled. |
646 | */ |
647 | static void free_tx_resources(struct sge *sge) |
648 | { |
649 | struct pci_dev *pdev = sge->adapter->pdev; |
650 | unsigned int size, i; |
651 | |
652 | for (i = 0; i < SGE_CMDQ_N; i++) { |
653 | struct cmdQ *q = &sge->cmdQ[i]; |
654 | |
655 | if (q->centries) { |
656 | if (q->in_use) |
657 | free_cmdQ_buffers(sge, q, n: q->in_use); |
658 | kfree(objp: q->centries); |
659 | } |
660 | if (q->entries) { |
661 | size = sizeof(struct cmdQ_e) * q->size; |
662 | dma_free_coherent(dev: &pdev->dev, size, cpu_addr: q->entries, |
663 | dma_handle: q->dma_addr); |
664 | } |
665 | } |
666 | } |
667 | |
668 | /* |
669 | * Allocates basic TX resources, consisting of memory mapped command Qs. |
670 | */ |
671 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) |
672 | { |
673 | struct pci_dev *pdev = sge->adapter->pdev; |
674 | unsigned int size, i; |
675 | |
676 | for (i = 0; i < SGE_CMDQ_N; i++) { |
677 | struct cmdQ *q = &sge->cmdQ[i]; |
678 | |
679 | q->genbit = 1; |
680 | q->sop = 1; |
681 | q->size = p->cmdQ_size[i]; |
682 | q->in_use = 0; |
683 | q->status = 0; |
684 | q->processed = q->cleaned = 0; |
685 | q->stop_thres = 0; |
686 | spin_lock_init(&q->lock); |
687 | size = sizeof(struct cmdQ_e) * q->size; |
688 | q->entries = dma_alloc_coherent(dev: &pdev->dev, size, |
689 | dma_handle: &q->dma_addr, GFP_KERNEL); |
690 | if (!q->entries) |
691 | goto err_no_mem; |
692 | |
693 | size = sizeof(struct cmdQ_ce) * q->size; |
694 | q->centries = kzalloc(size, GFP_KERNEL); |
695 | if (!q->centries) |
696 | goto err_no_mem; |
697 | } |
698 | |
699 | /* |
700 | * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE |
701 | * only. For queue 0 set the stop threshold so we can handle one more |
702 | * packet from each port, plus reserve an additional 24 entries for |
703 | * Ethernet packets only. Queue 1 never suspends nor do we reserve |
704 | * space for Ethernet packets. |
705 | */ |
706 | sge->cmdQ[0].stop_thres = sge->adapter->params.nports * |
707 | (MAX_SKB_FRAGS + 1); |
708 | return 0; |
709 | |
710 | err_no_mem: |
711 | free_tx_resources(sge); |
712 | return -ENOMEM; |
713 | } |
714 | |
715 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, |
716 | u32 size, int base_reg_lo, |
717 | int base_reg_hi, int size_reg) |
718 | { |
719 | writel(val: (u32)addr, addr: adapter->regs + base_reg_lo); |
720 | writel(val: addr >> 32, addr: adapter->regs + base_reg_hi); |
721 | writel(val: size, addr: adapter->regs + size_reg); |
722 | } |
723 | |
724 | /* |
725 | * Enable/disable VLAN acceleration. |
726 | */ |
727 | void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) |
728 | { |
729 | struct sge *sge = adapter->sge; |
730 | |
731 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
732 | sge->sge_control |= F_VLAN_XTRACT; |
733 | else |
734 | sge->sge_control &= ~F_VLAN_XTRACT; |
735 | if (adapter->open_device_map) { |
736 | writel(val: sge->sge_control, addr: adapter->regs + A_SG_CONTROL); |
737 | readl(addr: adapter->regs + A_SG_CONTROL); /* flush */ |
738 | } |
739 | } |
740 | |
741 | /* |
742 | * Programs the various SGE registers. However, the engine is not yet enabled, |
743 | * but sge->sge_control is setup and ready to go. |
744 | */ |
745 | static void configure_sge(struct sge *sge, struct sge_params *p) |
746 | { |
747 | struct adapter *ap = sge->adapter; |
748 | |
749 | writel(val: 0, addr: ap->regs + A_SG_CONTROL); |
750 | setup_ring_params(adapter: ap, addr: sge->cmdQ[0].dma_addr, size: sge->cmdQ[0].size, |
751 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); |
752 | setup_ring_params(adapter: ap, addr: sge->cmdQ[1].dma_addr, size: sge->cmdQ[1].size, |
753 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); |
754 | setup_ring_params(adapter: ap, addr: sge->freelQ[0].dma_addr, |
755 | size: sge->freelQ[0].size, A_SG_FL0BASELWR, |
756 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); |
757 | setup_ring_params(adapter: ap, addr: sge->freelQ[1].dma_addr, |
758 | size: sge->freelQ[1].size, A_SG_FL1BASELWR, |
759 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); |
760 | |
761 | /* The threshold comparison uses <. */ |
762 | writel(SGE_RX_SM_BUF_SIZE + 1, addr: ap->regs + A_SG_FLTHRESHOLD); |
763 | |
764 | setup_ring_params(adapter: ap, addr: sge->respQ.dma_addr, size: sge->respQ.size, |
765 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); |
766 | writel(val: (u32)sge->respQ.size - 1, addr: ap->regs + A_SG_RSPQUEUECREDIT); |
767 | |
768 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | |
769 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | |
770 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | |
771 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); |
772 | |
773 | #if defined(__BIG_ENDIAN_BITFIELD) |
774 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; |
775 | #endif |
776 | |
777 | /* Initialize no-resource timer */ |
778 | sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(adap: ap); |
779 | |
780 | t1_sge_set_coalesce_params(sge, p); |
781 | } |
782 | |
783 | /* |
784 | * Return the payload capacity of the jumbo free-list buffers. |
785 | */ |
786 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) |
787 | { |
788 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - |
789 | sge->freelQ[sge->jumbo_fl].dma_offset - |
790 | sizeof(struct cpl_rx_data); |
791 | } |
792 | |
793 | /* |
794 | * Frees all SGE related resources and the sge structure itself |
795 | */ |
796 | void t1_sge_destroy(struct sge *sge) |
797 | { |
798 | int i; |
799 | |
800 | for_each_port(sge->adapter, i) |
801 | free_percpu(pdata: sge->port_stats[i]); |
802 | |
803 | kfree(objp: sge->tx_sched); |
804 | free_tx_resources(sge); |
805 | free_rx_resources(sge); |
806 | kfree(objp: sge); |
807 | } |
808 | |
809 | /* |
810 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist |
811 | * context Q) until the Q is full or alloc_skb fails. |
812 | * |
813 | * It is possible that the generation bits already match, indicating that the |
814 | * buffer is already valid and nothing needs to be done. This happens when we |
815 | * copied a received buffer into a new sk_buff during the interrupt processing. |
816 | * |
817 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), |
818 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B |
819 | * aligned. |
820 | */ |
821 | static void refill_free_list(struct sge *sge, struct freelQ *q) |
822 | { |
823 | struct pci_dev *pdev = sge->adapter->pdev; |
824 | struct freelQ_ce *ce = &q->centries[q->pidx]; |
825 | struct freelQ_e *e = &q->entries[q->pidx]; |
826 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; |
827 | |
828 | while (q->credits < q->size) { |
829 | struct sk_buff *skb; |
830 | dma_addr_t mapping; |
831 | |
832 | skb = dev_alloc_skb(length: q->rx_buffer_size); |
833 | if (!skb) |
834 | break; |
835 | |
836 | skb_reserve(skb, len: q->dma_offset); |
837 | mapping = dma_map_single(&pdev->dev, skb->data, dma_len, |
838 | DMA_FROM_DEVICE); |
839 | skb_reserve(skb, len: sge->rx_pkt_pad); |
840 | |
841 | ce->skb = skb; |
842 | dma_unmap_addr_set(ce, dma_addr, mapping); |
843 | dma_unmap_len_set(ce, dma_len, dma_len); |
844 | e->addr_lo = (u32)mapping; |
845 | e->addr_hi = (u64)mapping >> 32; |
846 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); |
847 | wmb(); |
848 | e->gen2 = V_CMD_GEN2(q->genbit); |
849 | |
850 | e++; |
851 | ce++; |
852 | if (++q->pidx == q->size) { |
853 | q->pidx = 0; |
854 | q->genbit ^= 1; |
855 | ce = q->centries; |
856 | e = q->entries; |
857 | } |
858 | q->credits++; |
859 | } |
860 | } |
861 | |
862 | /* |
863 | * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 |
864 | * of both rings, we go into 'few interrupt mode' in order to give the system |
865 | * time to free up resources. |
866 | */ |
867 | static void freelQs_empty(struct sge *sge) |
868 | { |
869 | struct adapter *adapter = sge->adapter; |
870 | u32 irq_reg = readl(addr: adapter->regs + A_SG_INT_ENABLE); |
871 | u32 irqholdoff_reg; |
872 | |
873 | refill_free_list(sge, q: &sge->freelQ[0]); |
874 | refill_free_list(sge, q: &sge->freelQ[1]); |
875 | |
876 | if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && |
877 | sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { |
878 | irq_reg |= F_FL_EXHAUSTED; |
879 | irqholdoff_reg = sge->fixed_intrtimer; |
880 | } else { |
881 | /* Clear the F_FL_EXHAUSTED interrupts for now */ |
882 | irq_reg &= ~F_FL_EXHAUSTED; |
883 | irqholdoff_reg = sge->intrtimer_nres; |
884 | } |
885 | writel(val: irqholdoff_reg, addr: adapter->regs + A_SG_INTRTIMER); |
886 | writel(val: irq_reg, addr: adapter->regs + A_SG_INT_ENABLE); |
887 | |
888 | /* We reenable the Qs to force a freelist GTS interrupt later */ |
889 | doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
890 | } |
891 | |
892 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) |
893 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) |
894 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ |
895 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) |
896 | |
897 | /* |
898 | * Disable SGE Interrupts |
899 | */ |
900 | void t1_sge_intr_disable(struct sge *sge) |
901 | { |
902 | u32 val = readl(addr: sge->adapter->regs + A_PL_ENABLE); |
903 | |
904 | writel(val: val & ~SGE_PL_INTR_MASK, addr: sge->adapter->regs + A_PL_ENABLE); |
905 | writel(val: 0, addr: sge->adapter->regs + A_SG_INT_ENABLE); |
906 | } |
907 | |
908 | /* |
909 | * Enable SGE interrupts. |
910 | */ |
911 | void t1_sge_intr_enable(struct sge *sge) |
912 | { |
913 | u32 en = SGE_INT_ENABLE; |
914 | u32 val = readl(addr: sge->adapter->regs + A_PL_ENABLE); |
915 | |
916 | if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) |
917 | en &= ~F_PACKET_TOO_BIG; |
918 | writel(val: en, addr: sge->adapter->regs + A_SG_INT_ENABLE); |
919 | writel(val: val | SGE_PL_INTR_MASK, addr: sge->adapter->regs + A_PL_ENABLE); |
920 | } |
921 | |
922 | /* |
923 | * Clear SGE interrupts. |
924 | */ |
925 | void t1_sge_intr_clear(struct sge *sge) |
926 | { |
927 | writel(SGE_PL_INTR_MASK, addr: sge->adapter->regs + A_PL_CAUSE); |
928 | writel(val: 0xffffffff, addr: sge->adapter->regs + A_SG_INT_CAUSE); |
929 | } |
930 | |
931 | /* |
932 | * SGE 'Error' interrupt handler |
933 | */ |
934 | bool t1_sge_intr_error_handler(struct sge *sge) |
935 | { |
936 | struct adapter *adapter = sge->adapter; |
937 | u32 cause = readl(addr: adapter->regs + A_SG_INT_CAUSE); |
938 | bool wake = false; |
939 | |
940 | if (adapter->port[0].dev->hw_features & NETIF_F_TSO) |
941 | cause &= ~F_PACKET_TOO_BIG; |
942 | if (cause & F_RESPQ_EXHAUSTED) |
943 | sge->stats.respQ_empty++; |
944 | if (cause & F_RESPQ_OVERFLOW) { |
945 | sge->stats.respQ_overflow++; |
946 | pr_alert("%s: SGE response queue overflow\n" , |
947 | adapter->name); |
948 | } |
949 | if (cause & F_FL_EXHAUSTED) { |
950 | sge->stats.freelistQ_empty++; |
951 | freelQs_empty(sge); |
952 | } |
953 | if (cause & F_PACKET_TOO_BIG) { |
954 | sge->stats.pkt_too_big++; |
955 | pr_alert("%s: SGE max packet size exceeded\n" , |
956 | adapter->name); |
957 | } |
958 | if (cause & F_PACKET_MISMATCH) { |
959 | sge->stats.pkt_mismatch++; |
960 | pr_alert("%s: SGE packet mismatch\n" , adapter->name); |
961 | } |
962 | if (cause & SGE_INT_FATAL) { |
963 | t1_interrupts_disable(adapter); |
964 | adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR; |
965 | wake = true; |
966 | } |
967 | |
968 | writel(val: cause, addr: adapter->regs + A_SG_INT_CAUSE); |
969 | return wake; |
970 | } |
971 | |
972 | const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) |
973 | { |
974 | return &sge->stats; |
975 | } |
976 | |
977 | void t1_sge_get_port_stats(const struct sge *sge, int port, |
978 | struct sge_port_stats *ss) |
979 | { |
980 | int cpu; |
981 | |
982 | memset(ss, 0, sizeof(*ss)); |
983 | for_each_possible_cpu(cpu) { |
984 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); |
985 | |
986 | ss->rx_cso_good += st->rx_cso_good; |
987 | ss->tx_cso += st->tx_cso; |
988 | ss->tx_tso += st->tx_tso; |
989 | ss->tx_need_hdrroom += st->tx_need_hdrroom; |
990 | ss->vlan_xtract += st->vlan_xtract; |
991 | ss->vlan_insert += st->vlan_insert; |
992 | } |
993 | } |
994 | |
995 | /** |
996 | * recycle_fl_buf - recycle a free list buffer |
997 | * @fl: the free list |
998 | * @idx: index of buffer to recycle |
999 | * |
1000 | * Recycles the specified buffer on the given free list by adding it at |
1001 | * the next available slot on the list. |
1002 | */ |
1003 | static void recycle_fl_buf(struct freelQ *fl, int idx) |
1004 | { |
1005 | struct freelQ_e *from = &fl->entries[idx]; |
1006 | struct freelQ_e *to = &fl->entries[fl->pidx]; |
1007 | |
1008 | fl->centries[fl->pidx] = fl->centries[idx]; |
1009 | to->addr_lo = from->addr_lo; |
1010 | to->addr_hi = from->addr_hi; |
1011 | to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); |
1012 | wmb(); |
1013 | to->gen2 = V_CMD_GEN2(fl->genbit); |
1014 | fl->credits++; |
1015 | |
1016 | if (++fl->pidx == fl->size) { |
1017 | fl->pidx = 0; |
1018 | fl->genbit ^= 1; |
1019 | } |
1020 | } |
1021 | |
1022 | static int copybreak __read_mostly = 256; |
1023 | module_param(copybreak, int, 0); |
1024 | MODULE_PARM_DESC(copybreak, "Receive copy threshold" ); |
1025 | |
1026 | /** |
1027 | * get_packet - return the next ingress packet buffer |
1028 | * @adapter: the adapter that received the packet |
1029 | * @fl: the SGE free list holding the packet |
1030 | * @len: the actual packet length, excluding any SGE padding |
1031 | * |
1032 | * Get the next packet from a free list and complete setup of the |
1033 | * sk_buff. If the packet is small we make a copy and recycle the |
1034 | * original buffer, otherwise we use the original buffer itself. If a |
1035 | * positive drop threshold is supplied packets are dropped and their |
1036 | * buffers recycled if (a) the number of remaining buffers is under the |
1037 | * threshold and the packet is too big to copy, or (b) the packet should |
1038 | * be copied but there is no memory for the copy. |
1039 | */ |
1040 | static inline struct sk_buff *get_packet(struct adapter *adapter, |
1041 | struct freelQ *fl, unsigned int len) |
1042 | { |
1043 | const struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
1044 | struct pci_dev *pdev = adapter->pdev; |
1045 | struct sk_buff *skb; |
1046 | |
1047 | if (len < copybreak) { |
1048 | skb = napi_alloc_skb(napi: &adapter->napi, length: len); |
1049 | if (!skb) |
1050 | goto use_orig_buf; |
1051 | |
1052 | skb_put(skb, len); |
1053 | dma_sync_single_for_cpu(dev: &pdev->dev, |
1054 | dma_unmap_addr(ce, dma_addr), |
1055 | dma_unmap_len(ce, dma_len), |
1056 | dir: DMA_FROM_DEVICE); |
1057 | skb_copy_from_linear_data(skb: ce->skb, to: skb->data, len); |
1058 | dma_sync_single_for_device(dev: &pdev->dev, |
1059 | dma_unmap_addr(ce, dma_addr), |
1060 | dma_unmap_len(ce, dma_len), |
1061 | dir: DMA_FROM_DEVICE); |
1062 | recycle_fl_buf(fl, idx: fl->cidx); |
1063 | return skb; |
1064 | } |
1065 | |
1066 | use_orig_buf: |
1067 | if (fl->credits < 2) { |
1068 | recycle_fl_buf(fl, idx: fl->cidx); |
1069 | return NULL; |
1070 | } |
1071 | |
1072 | dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
1073 | dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
1074 | skb = ce->skb; |
1075 | prefetch(skb->data); |
1076 | |
1077 | skb_put(skb, len); |
1078 | return skb; |
1079 | } |
1080 | |
1081 | /** |
1082 | * unexpected_offload - handle an unexpected offload packet |
1083 | * @adapter: the adapter |
1084 | * @fl: the free list that received the packet |
1085 | * |
1086 | * Called when we receive an unexpected offload packet (e.g., the TOE |
1087 | * function is disabled or the card is a NIC). Prints a message and |
1088 | * recycles the buffer. |
1089 | */ |
1090 | static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) |
1091 | { |
1092 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
1093 | struct sk_buff *skb = ce->skb; |
1094 | |
1095 | dma_sync_single_for_cpu(dev: &adapter->pdev->dev, |
1096 | dma_unmap_addr(ce, dma_addr), |
1097 | dma_unmap_len(ce, dma_len), dir: DMA_FROM_DEVICE); |
1098 | pr_err("%s: unexpected offload packet, cmd %u\n" , |
1099 | adapter->name, *skb->data); |
1100 | recycle_fl_buf(fl, idx: fl->cidx); |
1101 | } |
1102 | |
1103 | /* |
1104 | * T1/T2 SGE limits the maximum DMA size per TX descriptor to |
1105 | * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the |
1106 | * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. |
1107 | * Note that the *_large_page_tx_descs stuff will be optimized out when |
1108 | * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. |
1109 | * |
1110 | * compute_large_page_descs() computes how many additional descriptors are |
1111 | * required to break down the stack's request. |
1112 | */ |
1113 | static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) |
1114 | { |
1115 | unsigned int count = 0; |
1116 | |
1117 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { |
1118 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; |
1119 | unsigned int i, len = skb_headlen(skb); |
1120 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1121 | count++; |
1122 | len -= SGE_TX_DESC_MAX_PLEN; |
1123 | } |
1124 | for (i = 0; nfrags--; i++) { |
1125 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1126 | len = skb_frag_size(frag); |
1127 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1128 | count++; |
1129 | len -= SGE_TX_DESC_MAX_PLEN; |
1130 | } |
1131 | } |
1132 | } |
1133 | return count; |
1134 | } |
1135 | |
1136 | /* |
1137 | * Write a cmdQ entry. |
1138 | * |
1139 | * Since this function writes the 'flags' field, it must not be used to |
1140 | * write the first cmdQ entry. |
1141 | */ |
1142 | static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, |
1143 | unsigned int len, unsigned int gen, |
1144 | unsigned int eop) |
1145 | { |
1146 | BUG_ON(len > SGE_TX_DESC_MAX_PLEN); |
1147 | |
1148 | e->addr_lo = (u32)mapping; |
1149 | e->addr_hi = (u64)mapping >> 32; |
1150 | e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); |
1151 | e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); |
1152 | } |
1153 | |
1154 | /* |
1155 | * See comment for previous function. |
1156 | * |
1157 | * write_tx_descs_large_page() writes additional SGE tx descriptors if |
1158 | * *desc_len exceeds HW's capability. |
1159 | */ |
1160 | static inline unsigned int write_large_page_tx_descs(unsigned int pidx, |
1161 | struct cmdQ_e **e, |
1162 | struct cmdQ_ce **ce, |
1163 | unsigned int *gen, |
1164 | dma_addr_t *desc_mapping, |
1165 | unsigned int *desc_len, |
1166 | unsigned int nfrags, |
1167 | struct cmdQ *q) |
1168 | { |
1169 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { |
1170 | struct cmdQ_e *e1 = *e; |
1171 | struct cmdQ_ce *ce1 = *ce; |
1172 | |
1173 | while (*desc_len > SGE_TX_DESC_MAX_PLEN) { |
1174 | *desc_len -= SGE_TX_DESC_MAX_PLEN; |
1175 | write_tx_desc(e: e1, mapping: *desc_mapping, SGE_TX_DESC_MAX_PLEN, |
1176 | gen: *gen, eop: nfrags == 0 && *desc_len == 0); |
1177 | ce1->skb = NULL; |
1178 | dma_unmap_len_set(ce1, dma_len, 0); |
1179 | *desc_mapping += SGE_TX_DESC_MAX_PLEN; |
1180 | if (*desc_len) { |
1181 | ce1++; |
1182 | e1++; |
1183 | if (++pidx == q->size) { |
1184 | pidx = 0; |
1185 | *gen ^= 1; |
1186 | ce1 = q->centries; |
1187 | e1 = q->entries; |
1188 | } |
1189 | } |
1190 | } |
1191 | *e = e1; |
1192 | *ce = ce1; |
1193 | } |
1194 | return pidx; |
1195 | } |
1196 | |
1197 | /* |
1198 | * Write the command descriptors to transmit the given skb starting at |
1199 | * descriptor pidx with the given generation. |
1200 | */ |
1201 | static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, |
1202 | unsigned int pidx, unsigned int gen, |
1203 | struct cmdQ *q) |
1204 | { |
1205 | dma_addr_t mapping, desc_mapping; |
1206 | struct cmdQ_e *e, *e1; |
1207 | struct cmdQ_ce *ce; |
1208 | unsigned int i, flags, first_desc_len, desc_len, |
1209 | nfrags = skb_shinfo(skb)->nr_frags; |
1210 | |
1211 | e = e1 = &q->entries[pidx]; |
1212 | ce = &q->centries[pidx]; |
1213 | |
1214 | mapping = dma_map_single(&adapter->pdev->dev, skb->data, |
1215 | skb_headlen(skb), DMA_TO_DEVICE); |
1216 | |
1217 | desc_mapping = mapping; |
1218 | desc_len = skb_headlen(skb); |
1219 | |
1220 | flags = F_CMD_DATAVALID | F_CMD_SOP | |
1221 | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | |
1222 | V_CMD_GEN2(gen); |
1223 | first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? |
1224 | desc_len : SGE_TX_DESC_MAX_PLEN; |
1225 | e->addr_lo = (u32)desc_mapping; |
1226 | e->addr_hi = (u64)desc_mapping >> 32; |
1227 | e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); |
1228 | ce->skb = NULL; |
1229 | dma_unmap_len_set(ce, dma_len, 0); |
1230 | |
1231 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && |
1232 | desc_len > SGE_TX_DESC_MAX_PLEN) { |
1233 | desc_mapping += first_desc_len; |
1234 | desc_len -= first_desc_len; |
1235 | e1++; |
1236 | ce++; |
1237 | if (++pidx == q->size) { |
1238 | pidx = 0; |
1239 | gen ^= 1; |
1240 | e1 = q->entries; |
1241 | ce = q->centries; |
1242 | } |
1243 | pidx = write_large_page_tx_descs(pidx, e: &e1, ce: &ce, gen: &gen, |
1244 | desc_mapping: &desc_mapping, desc_len: &desc_len, |
1245 | nfrags, q); |
1246 | |
1247 | if (likely(desc_len)) |
1248 | write_tx_desc(e: e1, mapping: desc_mapping, len: desc_len, gen, |
1249 | eop: nfrags == 0); |
1250 | } |
1251 | |
1252 | ce->skb = NULL; |
1253 | dma_unmap_addr_set(ce, dma_addr, mapping); |
1254 | dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); |
1255 | |
1256 | for (i = 0; nfrags--; i++) { |
1257 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1258 | e1++; |
1259 | ce++; |
1260 | if (++pidx == q->size) { |
1261 | pidx = 0; |
1262 | gen ^= 1; |
1263 | e1 = q->entries; |
1264 | ce = q->centries; |
1265 | } |
1266 | |
1267 | mapping = skb_frag_dma_map(dev: &adapter->pdev->dev, frag, offset: 0, |
1268 | size: skb_frag_size(frag), dir: DMA_TO_DEVICE); |
1269 | desc_mapping = mapping; |
1270 | desc_len = skb_frag_size(frag); |
1271 | |
1272 | pidx = write_large_page_tx_descs(pidx, e: &e1, ce: &ce, gen: &gen, |
1273 | desc_mapping: &desc_mapping, desc_len: &desc_len, |
1274 | nfrags, q); |
1275 | if (likely(desc_len)) |
1276 | write_tx_desc(e: e1, mapping: desc_mapping, len: desc_len, gen, |
1277 | eop: nfrags == 0); |
1278 | ce->skb = NULL; |
1279 | dma_unmap_addr_set(ce, dma_addr, mapping); |
1280 | dma_unmap_len_set(ce, dma_len, skb_frag_size(frag)); |
1281 | } |
1282 | ce->skb = skb; |
1283 | wmb(); |
1284 | e->flags = flags; |
1285 | } |
1286 | |
1287 | /* |
1288 | * Clean up completed Tx buffers. |
1289 | */ |
1290 | static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) |
1291 | { |
1292 | unsigned int reclaim = q->processed - q->cleaned; |
1293 | |
1294 | if (reclaim) { |
1295 | pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n" , |
1296 | q->processed, q->cleaned); |
1297 | free_cmdQ_buffers(sge, q, n: reclaim); |
1298 | q->cleaned += reclaim; |
1299 | } |
1300 | } |
1301 | |
1302 | /* |
1303 | * Called from tasklet. Checks the scheduler for any |
1304 | * pending skbs that can be sent. |
1305 | */ |
1306 | static void restart_sched(struct tasklet_struct *t) |
1307 | { |
1308 | struct sched *s = from_tasklet(s, t, sched_tsk); |
1309 | struct sge *sge = s->sge; |
1310 | struct adapter *adapter = sge->adapter; |
1311 | struct cmdQ *q = &sge->cmdQ[0]; |
1312 | struct sk_buff *skb; |
1313 | unsigned int credits, queued_skb = 0; |
1314 | |
1315 | spin_lock(lock: &q->lock); |
1316 | reclaim_completed_tx(sge, q); |
1317 | |
1318 | credits = q->size - q->in_use; |
1319 | pr_debug("restart_sched credits=%d\n" , credits); |
1320 | while ((skb = sched_skb(sge, NULL, credits)) != NULL) { |
1321 | unsigned int genbit, pidx, count; |
1322 | count = 1 + skb_shinfo(skb)->nr_frags; |
1323 | count += compute_large_page_tx_descs(skb); |
1324 | q->in_use += count; |
1325 | genbit = q->genbit; |
1326 | pidx = q->pidx; |
1327 | q->pidx += count; |
1328 | if (q->pidx >= q->size) { |
1329 | q->pidx -= q->size; |
1330 | q->genbit ^= 1; |
1331 | } |
1332 | write_tx_descs(adapter, skb, pidx, gen: genbit, q); |
1333 | credits = q->size - q->in_use; |
1334 | queued_skb = 1; |
1335 | } |
1336 | |
1337 | if (queued_skb) { |
1338 | clear_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
1339 | if (test_and_set_bit(nr: CMDQ_STAT_RUNNING, addr: &q->status) == 0) { |
1340 | set_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
1341 | writel(F_CMDQ0_ENABLE, addr: adapter->regs + A_SG_DOORBELL); |
1342 | } |
1343 | } |
1344 | spin_unlock(lock: &q->lock); |
1345 | } |
1346 | |
1347 | /** |
1348 | * sge_rx - process an ingress ethernet packet |
1349 | * @sge: the sge structure |
1350 | * @fl: the free list that contains the packet buffer |
1351 | * @len: the packet length |
1352 | * |
1353 | * Process an ingress ethernet packet and deliver it to the stack. |
1354 | */ |
1355 | static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
1356 | { |
1357 | struct sk_buff *skb; |
1358 | const struct cpl_rx_pkt *p; |
1359 | struct adapter *adapter = sge->adapter; |
1360 | struct sge_port_stats *st; |
1361 | struct net_device *dev; |
1362 | |
1363 | skb = get_packet(adapter, fl, len: len - sge->rx_pkt_pad); |
1364 | if (unlikely(!skb)) { |
1365 | sge->stats.rx_drops++; |
1366 | return; |
1367 | } |
1368 | |
1369 | p = (const struct cpl_rx_pkt *) skb->data; |
1370 | if (p->iff >= adapter->params.nports) { |
1371 | kfree_skb(skb); |
1372 | return; |
1373 | } |
1374 | __skb_pull(skb, len: sizeof(*p)); |
1375 | |
1376 | st = this_cpu_ptr(sge->port_stats[p->iff]); |
1377 | dev = adapter->port[p->iff].dev; |
1378 | |
1379 | skb->protocol = eth_type_trans(skb, dev); |
1380 | if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && |
1381 | skb->protocol == htons(ETH_P_IP) && |
1382 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
1383 | ++st->rx_cso_good; |
1384 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1385 | } else |
1386 | skb_checksum_none_assert(skb); |
1387 | |
1388 | if (p->vlan_valid) { |
1389 | st->vlan_xtract++; |
1390 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); |
1391 | } |
1392 | netif_receive_skb(skb); |
1393 | } |
1394 | |
1395 | /* |
1396 | * Returns true if a command queue has enough available descriptors that |
1397 | * we can resume Tx operation after temporarily disabling its packet queue. |
1398 | */ |
1399 | static inline int enough_free_Tx_descs(const struct cmdQ *q) |
1400 | { |
1401 | unsigned int r = q->processed - q->cleaned; |
1402 | |
1403 | return q->in_use - r < (q->size >> 1); |
1404 | } |
1405 | |
1406 | /* |
1407 | * Called when sufficient space has become available in the SGE command queues |
1408 | * after the Tx packet schedulers have been suspended to restart the Tx path. |
1409 | */ |
1410 | static void restart_tx_queues(struct sge *sge) |
1411 | { |
1412 | struct adapter *adap = sge->adapter; |
1413 | int i; |
1414 | |
1415 | if (!enough_free_Tx_descs(q: &sge->cmdQ[0])) |
1416 | return; |
1417 | |
1418 | for_each_port(adap, i) { |
1419 | struct net_device *nd = adap->port[i].dev; |
1420 | |
1421 | if (test_and_clear_bit(nr: nd->if_port, addr: &sge->stopped_tx_queues) && |
1422 | netif_running(dev: nd)) { |
1423 | sge->stats.cmdQ_restarted[2]++; |
1424 | netif_wake_queue(dev: nd); |
1425 | } |
1426 | } |
1427 | } |
1428 | |
1429 | /* |
1430 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 |
1431 | * information. |
1432 | */ |
1433 | static unsigned int update_tx_info(struct adapter *adapter, |
1434 | unsigned int flags, |
1435 | unsigned int pr0) |
1436 | { |
1437 | struct sge *sge = adapter->sge; |
1438 | struct cmdQ *cmdq = &sge->cmdQ[0]; |
1439 | |
1440 | cmdq->processed += pr0; |
1441 | if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { |
1442 | freelQs_empty(sge); |
1443 | flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); |
1444 | } |
1445 | if (flags & F_CMDQ0_ENABLE) { |
1446 | clear_bit(nr: CMDQ_STAT_RUNNING, addr: &cmdq->status); |
1447 | |
1448 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && |
1449 | !test_and_set_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &cmdq->status)) { |
1450 | set_bit(nr: CMDQ_STAT_RUNNING, addr: &cmdq->status); |
1451 | writel(F_CMDQ0_ENABLE, addr: adapter->regs + A_SG_DOORBELL); |
1452 | } |
1453 | if (sge->tx_sched) |
1454 | tasklet_hi_schedule(t: &sge->tx_sched->sched_tsk); |
1455 | |
1456 | flags &= ~F_CMDQ0_ENABLE; |
1457 | } |
1458 | |
1459 | if (unlikely(sge->stopped_tx_queues != 0)) |
1460 | restart_tx_queues(sge); |
1461 | |
1462 | return flags; |
1463 | } |
1464 | |
1465 | /* |
1466 | * Process SGE responses, up to the supplied budget. Returns the number of |
1467 | * responses processed. A negative budget is effectively unlimited. |
1468 | */ |
1469 | static int process_responses(struct adapter *adapter, int budget) |
1470 | { |
1471 | struct sge *sge = adapter->sge; |
1472 | struct respQ *q = &sge->respQ; |
1473 | struct respQ_e *e = &q->entries[q->cidx]; |
1474 | int done = 0; |
1475 | unsigned int flags = 0; |
1476 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
1477 | |
1478 | while (done < budget && e->GenerationBit == q->genbit) { |
1479 | flags |= e->Qsleeping; |
1480 | |
1481 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1482 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
1483 | |
1484 | /* We batch updates to the TX side to avoid cacheline |
1485 | * ping-pong of TX state information on MP where the sender |
1486 | * might run on a different CPU than this function... |
1487 | */ |
1488 | if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { |
1489 | flags = update_tx_info(adapter, flags, pr0: cmdq_processed[0]); |
1490 | cmdq_processed[0] = 0; |
1491 | } |
1492 | |
1493 | if (unlikely(cmdq_processed[1] > 16)) { |
1494 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1495 | cmdq_processed[1] = 0; |
1496 | } |
1497 | |
1498 | if (likely(e->DataValid)) { |
1499 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
1500 | |
1501 | BUG_ON(!e->Sop || !e->Eop); |
1502 | if (unlikely(e->Offload)) |
1503 | unexpected_offload(adapter, fl); |
1504 | else |
1505 | sge_rx(sge, fl, len: e->BufferLength); |
1506 | |
1507 | ++done; |
1508 | |
1509 | /* |
1510 | * Note: this depends on each packet consuming a |
1511 | * single free-list buffer; cf. the BUG above. |
1512 | */ |
1513 | if (++fl->cidx == fl->size) |
1514 | fl->cidx = 0; |
1515 | prefetch(fl->centries[fl->cidx].skb); |
1516 | |
1517 | if (unlikely(--fl->credits < |
1518 | fl->size - SGE_FREEL_REFILL_THRESH)) |
1519 | refill_free_list(sge, q: fl); |
1520 | } else |
1521 | sge->stats.pure_rsps++; |
1522 | |
1523 | e++; |
1524 | if (unlikely(++q->cidx == q->size)) { |
1525 | q->cidx = 0; |
1526 | q->genbit ^= 1; |
1527 | e = q->entries; |
1528 | } |
1529 | prefetch(e); |
1530 | |
1531 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
1532 | writel(val: q->credits, addr: adapter->regs + A_SG_RSPQUEUECREDIT); |
1533 | q->credits = 0; |
1534 | } |
1535 | } |
1536 | |
1537 | flags = update_tx_info(adapter, flags, pr0: cmdq_processed[0]); |
1538 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1539 | |
1540 | return done; |
1541 | } |
1542 | |
1543 | static inline int responses_pending(const struct adapter *adapter) |
1544 | { |
1545 | const struct respQ *Q = &adapter->sge->respQ; |
1546 | const struct respQ_e *e = &Q->entries[Q->cidx]; |
1547 | |
1548 | return e->GenerationBit == Q->genbit; |
1549 | } |
1550 | |
1551 | /* |
1552 | * A simpler version of process_responses() that handles only pure (i.e., |
1553 | * non data-carrying) responses. Such respones are too light-weight to justify |
1554 | * calling a softirq when using NAPI, so we handle them specially in hard |
1555 | * interrupt context. The function is called with a pointer to a response, |
1556 | * which the caller must ensure is a valid pure response. Returns 1 if it |
1557 | * encounters a valid data-carrying response, 0 otherwise. |
1558 | */ |
1559 | static int process_pure_responses(struct adapter *adapter) |
1560 | { |
1561 | struct sge *sge = adapter->sge; |
1562 | struct respQ *q = &sge->respQ; |
1563 | struct respQ_e *e = &q->entries[q->cidx]; |
1564 | const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
1565 | unsigned int flags = 0; |
1566 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
1567 | |
1568 | prefetch(fl->centries[fl->cidx].skb); |
1569 | if (e->DataValid) |
1570 | return 1; |
1571 | |
1572 | do { |
1573 | flags |= e->Qsleeping; |
1574 | |
1575 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1576 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
1577 | |
1578 | e++; |
1579 | if (unlikely(++q->cidx == q->size)) { |
1580 | q->cidx = 0; |
1581 | q->genbit ^= 1; |
1582 | e = q->entries; |
1583 | } |
1584 | prefetch(e); |
1585 | |
1586 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
1587 | writel(val: q->credits, addr: adapter->regs + A_SG_RSPQUEUECREDIT); |
1588 | q->credits = 0; |
1589 | } |
1590 | sge->stats.pure_rsps++; |
1591 | } while (e->GenerationBit == q->genbit && !e->DataValid); |
1592 | |
1593 | flags = update_tx_info(adapter, flags, pr0: cmdq_processed[0]); |
1594 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1595 | |
1596 | return e->GenerationBit == q->genbit; |
1597 | } |
1598 | |
1599 | /* |
1600 | * Handler for new data events when using NAPI. This does not need any locking |
1601 | * or protection from interrupts as data interrupts are off at this point and |
1602 | * other adapter interrupts do not interfere. |
1603 | */ |
1604 | int t1_poll(struct napi_struct *napi, int budget) |
1605 | { |
1606 | struct adapter *adapter = container_of(napi, struct adapter, napi); |
1607 | int work_done = process_responses(adapter, budget); |
1608 | |
1609 | if (likely(work_done < budget)) { |
1610 | napi_complete_done(n: napi, work_done); |
1611 | writel(val: adapter->sge->respQ.cidx, |
1612 | addr: adapter->regs + A_SG_SLEEPING); |
1613 | } |
1614 | return work_done; |
1615 | } |
1616 | |
1617 | irqreturn_t t1_interrupt_thread(int irq, void *data) |
1618 | { |
1619 | struct adapter *adapter = data; |
1620 | u32 pending_thread_intr; |
1621 | |
1622 | spin_lock_irq(lock: &adapter->async_lock); |
1623 | pending_thread_intr = adapter->pending_thread_intr; |
1624 | adapter->pending_thread_intr = 0; |
1625 | spin_unlock_irq(lock: &adapter->async_lock); |
1626 | |
1627 | if (!pending_thread_intr) |
1628 | return IRQ_NONE; |
1629 | |
1630 | if (pending_thread_intr & F_PL_INTR_EXT) |
1631 | t1_elmer0_ext_intr_handler(adapter); |
1632 | |
1633 | /* This error is fatal, interrupts remain off */ |
1634 | if (pending_thread_intr & F_PL_INTR_SGE_ERR) { |
1635 | pr_alert("%s: encountered fatal error, operation suspended\n" , |
1636 | adapter->name); |
1637 | t1_sge_stop(adapter->sge); |
1638 | return IRQ_HANDLED; |
1639 | } |
1640 | |
1641 | spin_lock_irq(lock: &adapter->async_lock); |
1642 | adapter->slow_intr_mask |= F_PL_INTR_EXT; |
1643 | |
1644 | writel(F_PL_INTR_EXT, addr: adapter->regs + A_PL_CAUSE); |
1645 | writel(val: adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, |
1646 | addr: adapter->regs + A_PL_ENABLE); |
1647 | spin_unlock_irq(lock: &adapter->async_lock); |
1648 | |
1649 | return IRQ_HANDLED; |
1650 | } |
1651 | |
1652 | irqreturn_t t1_interrupt(int irq, void *data) |
1653 | { |
1654 | struct adapter *adapter = data; |
1655 | struct sge *sge = adapter->sge; |
1656 | irqreturn_t handled; |
1657 | |
1658 | if (likely(responses_pending(adapter))) { |
1659 | writel(F_PL_INTR_SGE_DATA, addr: adapter->regs + A_PL_CAUSE); |
1660 | |
1661 | if (napi_schedule_prep(n: &adapter->napi)) { |
1662 | if (process_pure_responses(adapter)) |
1663 | __napi_schedule(n: &adapter->napi); |
1664 | else { |
1665 | /* no data, no NAPI needed */ |
1666 | writel(val: sge->respQ.cidx, addr: adapter->regs + A_SG_SLEEPING); |
1667 | /* undo schedule_prep */ |
1668 | napi_enable(n: &adapter->napi); |
1669 | } |
1670 | } |
1671 | return IRQ_HANDLED; |
1672 | } |
1673 | |
1674 | spin_lock(lock: &adapter->async_lock); |
1675 | handled = t1_slow_intr_handler(adapter); |
1676 | spin_unlock(lock: &adapter->async_lock); |
1677 | |
1678 | if (handled == IRQ_NONE) |
1679 | sge->stats.unhandled_irqs++; |
1680 | |
1681 | return handled; |
1682 | } |
1683 | |
1684 | /* |
1685 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. |
1686 | * |
1687 | * The code figures out how many entries the sk_buff will require in the |
1688 | * cmdQ and updates the cmdQ data structure with the state once the enqueue |
1689 | * has complete. Then, it doesn't access the global structure anymore, but |
1690 | * uses the corresponding fields on the stack. In conjunction with a spinlock |
1691 | * around that code, we can make the function reentrant without holding the |
1692 | * lock when we actually enqueue (which might be expensive, especially on |
1693 | * architectures with IO MMUs). |
1694 | * |
1695 | * This runs with softirqs disabled. |
1696 | */ |
1697 | static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, |
1698 | unsigned int qid, struct net_device *dev) |
1699 | { |
1700 | struct sge *sge = adapter->sge; |
1701 | struct cmdQ *q = &sge->cmdQ[qid]; |
1702 | unsigned int credits, pidx, genbit, count, use_sched_skb = 0; |
1703 | |
1704 | spin_lock(lock: &q->lock); |
1705 | |
1706 | reclaim_completed_tx(sge, q); |
1707 | |
1708 | pidx = q->pidx; |
1709 | credits = q->size - q->in_use; |
1710 | count = 1 + skb_shinfo(skb)->nr_frags; |
1711 | count += compute_large_page_tx_descs(skb); |
1712 | |
1713 | /* Ethernet packet */ |
1714 | if (unlikely(credits < count)) { |
1715 | if (!netif_queue_stopped(dev)) { |
1716 | netif_stop_queue(dev); |
1717 | set_bit(nr: dev->if_port, addr: &sge->stopped_tx_queues); |
1718 | sge->stats.cmdQ_full[2]++; |
1719 | pr_err("%s: Tx ring full while queue awake!\n" , |
1720 | adapter->name); |
1721 | } |
1722 | spin_unlock(lock: &q->lock); |
1723 | return NETDEV_TX_BUSY; |
1724 | } |
1725 | |
1726 | if (unlikely(credits - count < q->stop_thres)) { |
1727 | netif_stop_queue(dev); |
1728 | set_bit(nr: dev->if_port, addr: &sge->stopped_tx_queues); |
1729 | sge->stats.cmdQ_full[2]++; |
1730 | } |
1731 | |
1732 | /* T204 cmdQ0 skbs that are destined for a certain port have to go |
1733 | * through the scheduler. |
1734 | */ |
1735 | if (sge->tx_sched && !qid && skb->dev) { |
1736 | use_sched: |
1737 | use_sched_skb = 1; |
1738 | /* Note that the scheduler might return a different skb than |
1739 | * the one passed in. |
1740 | */ |
1741 | skb = sched_skb(sge, skb, credits); |
1742 | if (!skb) { |
1743 | spin_unlock(lock: &q->lock); |
1744 | return NETDEV_TX_OK; |
1745 | } |
1746 | pidx = q->pidx; |
1747 | count = 1 + skb_shinfo(skb)->nr_frags; |
1748 | count += compute_large_page_tx_descs(skb); |
1749 | } |
1750 | |
1751 | q->in_use += count; |
1752 | genbit = q->genbit; |
1753 | pidx = q->pidx; |
1754 | q->pidx += count; |
1755 | if (q->pidx >= q->size) { |
1756 | q->pidx -= q->size; |
1757 | q->genbit ^= 1; |
1758 | } |
1759 | spin_unlock(lock: &q->lock); |
1760 | |
1761 | write_tx_descs(adapter, skb, pidx, gen: genbit, q); |
1762 | |
1763 | /* |
1764 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring |
1765 | * the doorbell if the Q is asleep. There is a natural race, where |
1766 | * the hardware is going to sleep just after we checked, however, |
1767 | * then the interrupt handler will detect the outstanding TX packet |
1768 | * and ring the doorbell for us. |
1769 | */ |
1770 | if (qid) |
1771 | doorbell_pio(adapter, F_CMDQ1_ENABLE); |
1772 | else { |
1773 | clear_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
1774 | if (test_and_set_bit(nr: CMDQ_STAT_RUNNING, addr: &q->status) == 0) { |
1775 | set_bit(nr: CMDQ_STAT_LAST_PKT_DB, addr: &q->status); |
1776 | writel(F_CMDQ0_ENABLE, addr: adapter->regs + A_SG_DOORBELL); |
1777 | } |
1778 | } |
1779 | |
1780 | if (use_sched_skb) { |
1781 | if (spin_trylock(lock: &q->lock)) { |
1782 | credits = q->size - q->in_use; |
1783 | skb = NULL; |
1784 | goto use_sched; |
1785 | } |
1786 | } |
1787 | return NETDEV_TX_OK; |
1788 | } |
1789 | |
1790 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) |
1791 | |
1792 | /* |
1793 | * eth_hdr_len - return the length of an Ethernet header |
1794 | * @data: pointer to the start of the Ethernet header |
1795 | * |
1796 | * Returns the length of an Ethernet header, including optional VLAN tag. |
1797 | */ |
1798 | static inline int eth_hdr_len(const void *data) |
1799 | { |
1800 | const struct ethhdr *e = data; |
1801 | |
1802 | return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; |
1803 | } |
1804 | |
1805 | /* |
1806 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. |
1807 | */ |
1808 | netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1809 | { |
1810 | struct adapter *adapter = dev->ml_priv; |
1811 | struct sge *sge = adapter->sge; |
1812 | struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); |
1813 | struct cpl_tx_pkt *cpl; |
1814 | struct sk_buff *orig_skb = skb; |
1815 | int ret; |
1816 | |
1817 | if (skb->protocol == htons(ETH_P_CPL5)) |
1818 | goto send; |
1819 | |
1820 | /* |
1821 | * We are using a non-standard hard_header_len. |
1822 | * Allocate more header room in the rare cases it is not big enough. |
1823 | */ |
1824 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { |
1825 | skb = skb_realloc_headroom(skb, headroom: sizeof(struct cpl_tx_pkt_lso)); |
1826 | ++st->tx_need_hdrroom; |
1827 | dev_kfree_skb_any(skb: orig_skb); |
1828 | if (!skb) |
1829 | return NETDEV_TX_OK; |
1830 | } |
1831 | |
1832 | if (skb_shinfo(skb)->gso_size) { |
1833 | int eth_type; |
1834 | struct cpl_tx_pkt_lso *hdr; |
1835 | |
1836 | ++st->tx_tso; |
1837 | |
1838 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
1839 | CPL_ETH_II : CPL_ETH_II_VLAN; |
1840 | |
1841 | hdr = skb_push(skb, len: sizeof(*hdr)); |
1842 | hdr->opcode = CPL_TX_PKT_LSO; |
1843 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; |
1844 | hdr->ip_hdr_words = ip_hdr(skb)->ihl; |
1845 | hdr->tcp_hdr_words = tcp_hdr(skb)->doff; |
1846 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
1847 | skb_shinfo(skb)->gso_size)); |
1848 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
1849 | cpl = (struct cpl_tx_pkt *)hdr; |
1850 | } else { |
1851 | /* |
1852 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
1853 | * early. Also, we may get oversized packets because some |
1854 | * parts of the kernel don't handle our unusual hard_header_len |
1855 | * right, drop those too. |
1856 | */ |
1857 | if (unlikely(skb->len < ETH_HLEN || |
1858 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { |
1859 | netdev_dbg(dev, "packet size %d hdr %d mtu%d\n" , |
1860 | skb->len, eth_hdr_len(skb->data), dev->mtu); |
1861 | dev_kfree_skb_any(skb); |
1862 | return NETDEV_TX_OK; |
1863 | } |
1864 | |
1865 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
1866 | ip_hdr(skb)->protocol == IPPROTO_UDP) { |
1867 | if (unlikely(skb_checksum_help(skb))) { |
1868 | netdev_dbg(dev, "unable to do udp checksum\n" ); |
1869 | dev_kfree_skb_any(skb); |
1870 | return NETDEV_TX_OK; |
1871 | } |
1872 | } |
1873 | |
1874 | /* Hmmm, assuming to catch the gratious arp... and we'll use |
1875 | * it to flush out stuck espi packets... |
1876 | */ |
1877 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { |
1878 | if (skb->protocol == htons(ETH_P_ARP) && |
1879 | arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { |
1880 | adapter->sge->espibug_skb[dev->if_port] = skb; |
1881 | /* We want to re-use this skb later. We |
1882 | * simply bump the reference count and it |
1883 | * will not be freed... |
1884 | */ |
1885 | skb = skb_get(skb); |
1886 | } |
1887 | } |
1888 | |
1889 | cpl = __skb_push(skb, len: sizeof(*cpl)); |
1890 | cpl->opcode = CPL_TX_PKT; |
1891 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ |
1892 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; |
1893 | /* the length field isn't used so don't bother setting it */ |
1894 | |
1895 | st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); |
1896 | } |
1897 | cpl->iff = dev->if_port; |
1898 | |
1899 | if (skb_vlan_tag_present(skb)) { |
1900 | cpl->vlan_valid = 1; |
1901 | cpl->vlan = htons(skb_vlan_tag_get(skb)); |
1902 | st->vlan_insert++; |
1903 | } else |
1904 | cpl->vlan_valid = 0; |
1905 | |
1906 | send: |
1907 | ret = t1_sge_tx(skb, adapter, qid: 0, dev); |
1908 | |
1909 | /* If transmit busy, and we reallocated skb's due to headroom limit, |
1910 | * then silently discard to avoid leak. |
1911 | */ |
1912 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { |
1913 | dev_kfree_skb_any(skb); |
1914 | ret = NETDEV_TX_OK; |
1915 | } |
1916 | return ret; |
1917 | } |
1918 | |
1919 | /* |
1920 | * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. |
1921 | */ |
1922 | static void sge_tx_reclaim_cb(struct timer_list *t) |
1923 | { |
1924 | int i; |
1925 | struct sge *sge = from_timer(sge, t, tx_reclaim_timer); |
1926 | |
1927 | for (i = 0; i < SGE_CMDQ_N; ++i) { |
1928 | struct cmdQ *q = &sge->cmdQ[i]; |
1929 | |
1930 | if (!spin_trylock(lock: &q->lock)) |
1931 | continue; |
1932 | |
1933 | reclaim_completed_tx(sge, q); |
1934 | if (i == 0 && q->in_use) { /* flush pending credits */ |
1935 | writel(F_CMDQ0_ENABLE, addr: sge->adapter->regs + A_SG_DOORBELL); |
1936 | } |
1937 | spin_unlock(lock: &q->lock); |
1938 | } |
1939 | mod_timer(timer: &sge->tx_reclaim_timer, expires: jiffies + TX_RECLAIM_PERIOD); |
1940 | } |
1941 | |
1942 | /* |
1943 | * Propagate changes of the SGE coalescing parameters to the HW. |
1944 | */ |
1945 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) |
1946 | { |
1947 | sge->fixed_intrtimer = p->rx_coalesce_usecs * |
1948 | core_ticks_per_usec(adap: sge->adapter); |
1949 | writel(val: sge->fixed_intrtimer, addr: sge->adapter->regs + A_SG_INTRTIMER); |
1950 | return 0; |
1951 | } |
1952 | |
1953 | /* |
1954 | * Allocates both RX and TX resources and configures the SGE. However, |
1955 | * the hardware is not enabled yet. |
1956 | */ |
1957 | int t1_sge_configure(struct sge *sge, struct sge_params *p) |
1958 | { |
1959 | if (alloc_rx_resources(sge, p)) |
1960 | return -ENOMEM; |
1961 | if (alloc_tx_resources(sge, p)) { |
1962 | free_rx_resources(sge); |
1963 | return -ENOMEM; |
1964 | } |
1965 | configure_sge(sge, p); |
1966 | |
1967 | /* |
1968 | * Now that we have sized the free lists calculate the payload |
1969 | * capacity of the large buffers. Other parts of the driver use |
1970 | * this to set the max offload coalescing size so that RX packets |
1971 | * do not overflow our large buffers. |
1972 | */ |
1973 | p->large_buf_capacity = jumbo_payload_capacity(sge); |
1974 | return 0; |
1975 | } |
1976 | |
1977 | /* |
1978 | * Disables the DMA engine. |
1979 | */ |
1980 | void t1_sge_stop(struct sge *sge) |
1981 | { |
1982 | int i; |
1983 | writel(val: 0, addr: sge->adapter->regs + A_SG_CONTROL); |
1984 | readl(addr: sge->adapter->regs + A_SG_CONTROL); /* flush */ |
1985 | |
1986 | if (is_T2(sge->adapter)) |
1987 | del_timer_sync(timer: &sge->espibug_timer); |
1988 | |
1989 | del_timer_sync(timer: &sge->tx_reclaim_timer); |
1990 | if (sge->tx_sched) |
1991 | tx_sched_stop(sge); |
1992 | |
1993 | for (i = 0; i < MAX_NPORTS; i++) |
1994 | kfree_skb(skb: sge->espibug_skb[i]); |
1995 | } |
1996 | |
1997 | /* |
1998 | * Enables the DMA engine. |
1999 | */ |
2000 | void t1_sge_start(struct sge *sge) |
2001 | { |
2002 | refill_free_list(sge, q: &sge->freelQ[0]); |
2003 | refill_free_list(sge, q: &sge->freelQ[1]); |
2004 | |
2005 | writel(val: sge->sge_control, addr: sge->adapter->regs + A_SG_CONTROL); |
2006 | doorbell_pio(adapter: sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
2007 | readl(addr: sge->adapter->regs + A_SG_CONTROL); /* flush */ |
2008 | |
2009 | mod_timer(timer: &sge->tx_reclaim_timer, expires: jiffies + TX_RECLAIM_PERIOD); |
2010 | |
2011 | if (is_T2(sge->adapter)) |
2012 | mod_timer(timer: &sge->espibug_timer, expires: jiffies + sge->espibug_timeout); |
2013 | } |
2014 | |
2015 | /* |
2016 | * Callback for the T2 ESPI 'stuck packet feature' workaorund |
2017 | */ |
2018 | static void espibug_workaround_t204(struct timer_list *t) |
2019 | { |
2020 | struct sge *sge = from_timer(sge, t, espibug_timer); |
2021 | struct adapter *adapter = sge->adapter; |
2022 | unsigned int nports = adapter->params.nports; |
2023 | u32 seop[MAX_NPORTS]; |
2024 | |
2025 | if (adapter->open_device_map & PORT_MASK) { |
2026 | int i; |
2027 | |
2028 | if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) |
2029 | return; |
2030 | |
2031 | for (i = 0; i < nports; i++) { |
2032 | struct sk_buff *skb = sge->espibug_skb[i]; |
2033 | |
2034 | if (!netif_running(dev: adapter->port[i].dev) || |
2035 | netif_queue_stopped(dev: adapter->port[i].dev) || |
2036 | !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) |
2037 | continue; |
2038 | |
2039 | if (!skb->cb[0]) { |
2040 | skb_copy_to_linear_data_offset(skb, |
2041 | offset: sizeof(struct cpl_tx_pkt), |
2042 | from: ch_mac_addr, |
2043 | ETH_ALEN); |
2044 | skb_copy_to_linear_data_offset(skb, |
2045 | offset: skb->len - 10, |
2046 | from: ch_mac_addr, |
2047 | ETH_ALEN); |
2048 | skb->cb[0] = 0xff; |
2049 | } |
2050 | |
2051 | /* bump the reference count to avoid freeing of |
2052 | * the skb once the DMA has completed. |
2053 | */ |
2054 | skb = skb_get(skb); |
2055 | t1_sge_tx(skb, adapter, qid: 0, dev: adapter->port[i].dev); |
2056 | } |
2057 | } |
2058 | mod_timer(timer: &sge->espibug_timer, expires: jiffies + sge->espibug_timeout); |
2059 | } |
2060 | |
2061 | static void espibug_workaround(struct timer_list *t) |
2062 | { |
2063 | struct sge *sge = from_timer(sge, t, espibug_timer); |
2064 | struct adapter *adapter = sge->adapter; |
2065 | |
2066 | if (netif_running(dev: adapter->port[0].dev)) { |
2067 | struct sk_buff *skb = sge->espibug_skb[0]; |
2068 | u32 seop = t1_espi_get_mon(adapter, addr: 0x930, wait: 0); |
2069 | |
2070 | if ((seop & 0xfff0fff) == 0xfff && skb) { |
2071 | if (!skb->cb[0]) { |
2072 | skb_copy_to_linear_data_offset(skb, |
2073 | offset: sizeof(struct cpl_tx_pkt), |
2074 | from: ch_mac_addr, |
2075 | ETH_ALEN); |
2076 | skb_copy_to_linear_data_offset(skb, |
2077 | offset: skb->len - 10, |
2078 | from: ch_mac_addr, |
2079 | ETH_ALEN); |
2080 | skb->cb[0] = 0xff; |
2081 | } |
2082 | |
2083 | /* bump the reference count to avoid freeing of the |
2084 | * skb once the DMA has completed. |
2085 | */ |
2086 | skb = skb_get(skb); |
2087 | t1_sge_tx(skb, adapter, qid: 0, dev: adapter->port[0].dev); |
2088 | } |
2089 | } |
2090 | mod_timer(timer: &sge->espibug_timer, expires: jiffies + sge->espibug_timeout); |
2091 | } |
2092 | |
2093 | /* |
2094 | * Creates a t1_sge structure and returns suggested resource parameters. |
2095 | */ |
2096 | struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) |
2097 | { |
2098 | struct sge *sge = kzalloc(size: sizeof(*sge), GFP_KERNEL); |
2099 | int i; |
2100 | |
2101 | if (!sge) |
2102 | return NULL; |
2103 | |
2104 | sge->adapter = adapter; |
2105 | sge->netdev = adapter->port[0].dev; |
2106 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; |
2107 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; |
2108 | |
2109 | for_each_port(adapter, i) { |
2110 | sge->port_stats[i] = alloc_percpu(struct sge_port_stats); |
2111 | if (!sge->port_stats[i]) |
2112 | goto nomem_port; |
2113 | } |
2114 | |
2115 | timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0); |
2116 | |
2117 | if (is_T2(sge->adapter)) { |
2118 | timer_setup(&sge->espibug_timer, |
2119 | adapter->params.nports > 1 ? espibug_workaround_t204 : espibug_workaround, |
2120 | 0); |
2121 | |
2122 | if (adapter->params.nports > 1) |
2123 | tx_sched_init(sge); |
2124 | |
2125 | sge->espibug_timeout = 1; |
2126 | /* for T204, every 10ms */ |
2127 | if (adapter->params.nports > 1) |
2128 | sge->espibug_timeout = HZ/100; |
2129 | } |
2130 | |
2131 | |
2132 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; |
2133 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; |
2134 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; |
2135 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; |
2136 | if (sge->tx_sched) { |
2137 | if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) |
2138 | p->rx_coalesce_usecs = 15; |
2139 | else |
2140 | p->rx_coalesce_usecs = 50; |
2141 | } else |
2142 | p->rx_coalesce_usecs = 50; |
2143 | |
2144 | p->coalesce_enable = 0; |
2145 | p->sample_interval_usecs = 0; |
2146 | |
2147 | return sge; |
2148 | nomem_port: |
2149 | while (i >= 0) { |
2150 | free_percpu(pdata: sge->port_stats[i]); |
2151 | --i; |
2152 | } |
2153 | kfree(objp: sge); |
2154 | return NULL; |
2155 | |
2156 | } |
2157 | |