1 | /* |
2 | drivers/net/ethernet/dec/tulip/interrupt.c |
3 | |
4 | Copyright 2000,2001 The Linux Kernel Team |
5 | Written/copyright 1994-2001 by Donald Becker. |
6 | |
7 | This software may be used and distributed according to the terms |
8 | of the GNU General Public License, incorporated herein by reference. |
9 | |
10 | Please submit bugs to http://bugzilla.kernel.org/ . |
11 | */ |
12 | |
13 | #include <linux/pci.h> |
14 | #include "tulip.h" |
15 | #include <linux/etherdevice.h> |
16 | |
17 | int tulip_rx_copybreak; |
18 | unsigned int tulip_max_interrupt_work; |
19 | |
20 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
21 | #define MIT_SIZE 15 |
22 | #define MIT_TABLE 15 /* We use 0 or max */ |
23 | |
24 | static unsigned int mit_table[MIT_SIZE+1] = |
25 | { |
26 | /* CRS11 21143 hardware Mitigation Control Interrupt |
27 | We use only RX mitigation we other techniques for |
28 | TX intr. mitigation. |
29 | |
30 | 31 Cycle Size (timer control) |
31 | 30:27 TX timer in 16 * Cycle size |
32 | 26:24 TX No pkts before Int. |
33 | 23:20 RX timer in Cycle size |
34 | 19:17 RX No pkts before Int. |
35 | 16 Continues Mode (CM) |
36 | */ |
37 | |
38 | 0x0, /* IM disabled */ |
39 | 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ |
40 | 0x80150000, |
41 | 0x80270000, |
42 | 0x80370000, |
43 | 0x80490000, |
44 | 0x80590000, |
45 | 0x80690000, |
46 | 0x807B0000, |
47 | 0x808B0000, |
48 | 0x809D0000, |
49 | 0x80AD0000, |
50 | 0x80BD0000, |
51 | 0x80CF0000, |
52 | 0x80DF0000, |
53 | // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ |
54 | 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ |
55 | }; |
56 | #endif |
57 | |
58 | |
59 | int tulip_refill_rx(struct net_device *dev) |
60 | { |
61 | struct tulip_private *tp = netdev_priv(dev); |
62 | int entry; |
63 | int refilled = 0; |
64 | |
65 | /* Refill the Rx ring buffers. */ |
66 | for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { |
67 | entry = tp->dirty_rx % RX_RING_SIZE; |
68 | if (tp->rx_buffers[entry].skb == NULL) { |
69 | struct sk_buff *skb; |
70 | dma_addr_t mapping; |
71 | |
72 | skb = tp->rx_buffers[entry].skb = |
73 | netdev_alloc_skb(dev, PKT_BUF_SZ); |
74 | if (skb == NULL) |
75 | break; |
76 | |
77 | mapping = dma_map_single(&tp->pdev->dev, skb->data, |
78 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
79 | if (dma_mapping_error(dev: &tp->pdev->dev, dma_addr: mapping)) { |
80 | dev_kfree_skb(skb); |
81 | tp->rx_buffers[entry].skb = NULL; |
82 | break; |
83 | } |
84 | |
85 | tp->rx_buffers[entry].mapping = mapping; |
86 | |
87 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); |
88 | refilled++; |
89 | } |
90 | tp->rx_ring[entry].status = cpu_to_le32(DescOwned); |
91 | } |
92 | if(tp->chip_id == LC82C168) { |
93 | if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { |
94 | /* Rx stopped due to out of buffers, |
95 | * restart it |
96 | */ |
97 | iowrite32(0x01, tp->base_addr + CSR2); |
98 | } |
99 | } |
100 | return refilled; |
101 | } |
102 | |
103 | #ifdef CONFIG_TULIP_NAPI |
104 | |
105 | void oom_timer(struct timer_list *t) |
106 | { |
107 | struct tulip_private *tp = from_timer(tp, t, oom_timer); |
108 | |
109 | napi_schedule(n: &tp->napi); |
110 | } |
111 | |
112 | int tulip_poll(struct napi_struct *napi, int budget) |
113 | { |
114 | struct tulip_private *tp = container_of(napi, struct tulip_private, napi); |
115 | struct net_device *dev = tp->dev; |
116 | int entry = tp->cur_rx % RX_RING_SIZE; |
117 | int work_done = 0; |
118 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
119 | int received = 0; |
120 | #endif |
121 | |
122 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
123 | |
124 | /* that one buffer is needed for mit activation; or might be a |
125 | bug in the ring buffer code; check later -- JHS*/ |
126 | |
127 | if (budget >=RX_RING_SIZE) budget--; |
128 | #endif |
129 | |
130 | if (tulip_debug > 4) |
131 | netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n" , |
132 | entry, tp->rx_ring[entry].status); |
133 | |
134 | do { |
135 | if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { |
136 | netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n" ); |
137 | break; |
138 | } |
139 | /* Acknowledge current RX interrupt sources. */ |
140 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); |
141 | |
142 | |
143 | /* If we own the next entry, it is a new packet. Send it up. */ |
144 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
145 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
146 | short pkt_len; |
147 | |
148 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
149 | break; |
150 | |
151 | if (tulip_debug > 5) |
152 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n" , |
153 | entry, status); |
154 | |
155 | if (++work_done >= budget) |
156 | goto not_done; |
157 | |
158 | /* |
159 | * Omit the four octet CRC from the length. |
160 | * (May not be considered valid until we have |
161 | * checked status for RxLengthOver2047 bits) |
162 | */ |
163 | pkt_len = ((status >> 16) & 0x7ff) - 4; |
164 | |
165 | /* |
166 | * Maximum pkt_len is 1518 (1514 + vlan header) |
167 | * Anything higher than this is always invalid |
168 | * regardless of RxLengthOver2047 bits |
169 | */ |
170 | |
171 | if ((status & (RxLengthOver2047 | |
172 | RxDescCRCError | |
173 | RxDescCollisionSeen | |
174 | RxDescRunt | |
175 | RxDescDescErr | |
176 | RxWholePkt)) != RxWholePkt || |
177 | pkt_len > 1518) { |
178 | if ((status & (RxLengthOver2047 | |
179 | RxWholePkt)) != RxWholePkt) { |
180 | /* Ingore earlier buffers. */ |
181 | if ((status & 0xffff) != 0x7fff) { |
182 | if (tulip_debug > 1) |
183 | dev_warn(&dev->dev, |
184 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n" , |
185 | status); |
186 | dev->stats.rx_length_errors++; |
187 | } |
188 | } else { |
189 | /* There was a fatal error. */ |
190 | if (tulip_debug > 2) |
191 | netdev_dbg(dev, "Receive error, Rx status %08x\n" , |
192 | status); |
193 | dev->stats.rx_errors++; /* end of a packet.*/ |
194 | if (pkt_len > 1518 || |
195 | (status & RxDescRunt)) |
196 | dev->stats.rx_length_errors++; |
197 | |
198 | if (status & 0x0004) |
199 | dev->stats.rx_frame_errors++; |
200 | if (status & 0x0002) |
201 | dev->stats.rx_crc_errors++; |
202 | if (status & 0x0001) |
203 | dev->stats.rx_fifo_errors++; |
204 | } |
205 | } else { |
206 | struct sk_buff *skb; |
207 | |
208 | /* Check if the packet is long enough to accept without copying |
209 | to a minimally-sized skbuff. */ |
210 | if (pkt_len < tulip_rx_copybreak && |
211 | (skb = netdev_alloc_skb(dev, length: pkt_len + 2)) != NULL) { |
212 | skb_reserve(skb, len: 2); /* 16 byte align the IP header */ |
213 | dma_sync_single_for_cpu(dev: &tp->pdev->dev, |
214 | addr: tp->rx_buffers[entry].mapping, |
215 | size: pkt_len, |
216 | dir: DMA_FROM_DEVICE); |
217 | #if ! defined(__alpha__) |
218 | skb_copy_to_linear_data(skb, from: tp->rx_buffers[entry].skb->data, |
219 | len: pkt_len); |
220 | skb_put(skb, len: pkt_len); |
221 | #else |
222 | skb_put_data(skb, |
223 | tp->rx_buffers[entry].skb->data, |
224 | pkt_len); |
225 | #endif |
226 | dma_sync_single_for_device(dev: &tp->pdev->dev, |
227 | addr: tp->rx_buffers[entry].mapping, |
228 | size: pkt_len, |
229 | dir: DMA_FROM_DEVICE); |
230 | } else { /* Pass up the skb already on the Rx ring. */ |
231 | char *temp = skb_put(skb: skb = tp->rx_buffers[entry].skb, |
232 | len: pkt_len); |
233 | |
234 | #ifndef final_version |
235 | if (tp->rx_buffers[entry].mapping != |
236 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { |
237 | dev_err(&dev->dev, |
238 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n" , |
239 | le32_to_cpu(tp->rx_ring[entry].buffer1), |
240 | (unsigned long long)tp->rx_buffers[entry].mapping, |
241 | skb->head, temp); |
242 | } |
243 | #endif |
244 | |
245 | dma_unmap_single(&tp->pdev->dev, |
246 | tp->rx_buffers[entry].mapping, |
247 | PKT_BUF_SZ, |
248 | DMA_FROM_DEVICE); |
249 | |
250 | tp->rx_buffers[entry].skb = NULL; |
251 | tp->rx_buffers[entry].mapping = 0; |
252 | } |
253 | skb->protocol = eth_type_trans(skb, dev); |
254 | |
255 | netif_receive_skb(skb); |
256 | |
257 | dev->stats.rx_packets++; |
258 | dev->stats.rx_bytes += pkt_len; |
259 | } |
260 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
261 | received++; |
262 | #endif |
263 | |
264 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
265 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) |
266 | tulip_refill_rx(dev); |
267 | |
268 | } |
269 | |
270 | /* New ack strategy... irq does not ack Rx any longer |
271 | hopefully this helps */ |
272 | |
273 | /* Really bad things can happen here... If new packet arrives |
274 | * and an irq arrives (tx or just due to occasionally unset |
275 | * mask), it will be acked by irq handler, but new thread |
276 | * is not scheduled. It is major hole in design. |
277 | * No idea how to fix this if "playing with fire" will fail |
278 | * tomorrow (night 011029). If it will not fail, we won |
279 | * finally: amount of IO did not increase at all. */ |
280 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); |
281 | |
282 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
283 | |
284 | /* We use this simplistic scheme for IM. It's proven by |
285 | real life installations. We can have IM enabled |
286 | continuesly but this would cause unnecessary latency. |
287 | Unfortunely we can't use all the NET_RX_* feedback here. |
288 | This would turn on IM for devices that is not contributing |
289 | to backlog congestion with unnecessary latency. |
290 | |
291 | We monitor the device RX-ring and have: |
292 | |
293 | HW Interrupt Mitigation either ON or OFF. |
294 | |
295 | ON: More then 1 pkt received (per intr.) OR we are dropping |
296 | OFF: Only 1 pkt received |
297 | |
298 | Note. We only use min and max (0, 15) settings from mit_table */ |
299 | |
300 | |
301 | if( tp->flags & HAS_INTR_MITIGATION) { |
302 | if( received > 1 ) { |
303 | if( ! tp->mit_on ) { |
304 | tp->mit_on = 1; |
305 | iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); |
306 | } |
307 | } |
308 | else { |
309 | if( tp->mit_on ) { |
310 | tp->mit_on = 0; |
311 | iowrite32(0, tp->base_addr + CSR11); |
312 | } |
313 | } |
314 | } |
315 | |
316 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ |
317 | |
318 | tulip_refill_rx(dev); |
319 | |
320 | /* If RX ring is not full we are out of memory. */ |
321 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
322 | goto oom; |
323 | |
324 | /* Remove us from polling list and enable RX intr. */ |
325 | |
326 | napi_complete_done(n: napi, work_done); |
327 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); |
328 | |
329 | /* The last op happens after poll completion. Which means the following: |
330 | * 1. it can race with disabling irqs in irq handler |
331 | * 2. it can race with dise/enabling irqs in other poll threads |
332 | * 3. if an irq raised after beginning loop, it will be immediately |
333 | * triggered here. |
334 | * |
335 | * Summarizing: the logic results in some redundant irqs both |
336 | * due to races in masking and due to too late acking of already |
337 | * processed irqs. But it must not result in losing events. |
338 | */ |
339 | |
340 | return work_done; |
341 | |
342 | not_done: |
343 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
344 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
345 | tulip_refill_rx(dev); |
346 | |
347 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
348 | goto oom; |
349 | |
350 | return work_done; |
351 | |
352 | oom: /* Executed with RX ints disabled */ |
353 | |
354 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
355 | mod_timer(timer: &tp->oom_timer, expires: jiffies+1); |
356 | |
357 | /* Think: timer_pending() was an explicit signature of bug. |
358 | * Timer can be pending now but fired and completed |
359 | * before we did napi_complete(). See? We would lose it. */ |
360 | |
361 | /* remove ourselves from the polling list */ |
362 | napi_complete_done(n: napi, work_done); |
363 | |
364 | return work_done; |
365 | } |
366 | |
367 | #else /* CONFIG_TULIP_NAPI */ |
368 | |
369 | static int tulip_rx(struct net_device *dev) |
370 | { |
371 | struct tulip_private *tp = netdev_priv(dev); |
372 | int entry = tp->cur_rx % RX_RING_SIZE; |
373 | int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; |
374 | int received = 0; |
375 | |
376 | if (tulip_debug > 4) |
377 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n" , |
378 | entry, tp->rx_ring[entry].status); |
379 | /* If we own the next entry, it is a new packet. Send it up. */ |
380 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
381 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
382 | short pkt_len; |
383 | |
384 | if (tulip_debug > 5) |
385 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n" , |
386 | entry, status); |
387 | if (--rx_work_limit < 0) |
388 | break; |
389 | |
390 | /* |
391 | Omit the four octet CRC from the length. |
392 | (May not be considered valid until we have |
393 | checked status for RxLengthOver2047 bits) |
394 | */ |
395 | pkt_len = ((status >> 16) & 0x7ff) - 4; |
396 | /* |
397 | Maximum pkt_len is 1518 (1514 + vlan header) |
398 | Anything higher than this is always invalid |
399 | regardless of RxLengthOver2047 bits |
400 | */ |
401 | |
402 | if ((status & (RxLengthOver2047 | |
403 | RxDescCRCError | |
404 | RxDescCollisionSeen | |
405 | RxDescRunt | |
406 | RxDescDescErr | |
407 | RxWholePkt)) != RxWholePkt || |
408 | pkt_len > 1518) { |
409 | if ((status & (RxLengthOver2047 | |
410 | RxWholePkt)) != RxWholePkt) { |
411 | /* Ingore earlier buffers. */ |
412 | if ((status & 0xffff) != 0x7fff) { |
413 | if (tulip_debug > 1) |
414 | netdev_warn(dev, |
415 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n" , |
416 | status); |
417 | dev->stats.rx_length_errors++; |
418 | } |
419 | } else { |
420 | /* There was a fatal error. */ |
421 | if (tulip_debug > 2) |
422 | netdev_dbg(dev, "Receive error, Rx status %08x\n" , |
423 | status); |
424 | dev->stats.rx_errors++; /* end of a packet.*/ |
425 | if (pkt_len > 1518 || |
426 | (status & RxDescRunt)) |
427 | dev->stats.rx_length_errors++; |
428 | if (status & 0x0004) |
429 | dev->stats.rx_frame_errors++; |
430 | if (status & 0x0002) |
431 | dev->stats.rx_crc_errors++; |
432 | if (status & 0x0001) |
433 | dev->stats.rx_fifo_errors++; |
434 | } |
435 | } else { |
436 | struct sk_buff *skb; |
437 | |
438 | /* Check if the packet is long enough to accept without copying |
439 | to a minimally-sized skbuff. */ |
440 | if (pkt_len < tulip_rx_copybreak && |
441 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
442 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
443 | dma_sync_single_for_cpu(&tp->pdev->dev, |
444 | tp->rx_buffers[entry].mapping, |
445 | pkt_len, |
446 | DMA_FROM_DEVICE); |
447 | #if ! defined(__alpha__) |
448 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, |
449 | pkt_len); |
450 | skb_put(skb, pkt_len); |
451 | #else |
452 | skb_put_data(skb, |
453 | tp->rx_buffers[entry].skb->data, |
454 | pkt_len); |
455 | #endif |
456 | dma_sync_single_for_device(&tp->pdev->dev, |
457 | tp->rx_buffers[entry].mapping, |
458 | pkt_len, |
459 | DMA_FROM_DEVICE); |
460 | } else { /* Pass up the skb already on the Rx ring. */ |
461 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, |
462 | pkt_len); |
463 | |
464 | #ifndef final_version |
465 | if (tp->rx_buffers[entry].mapping != |
466 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { |
467 | dev_err(&dev->dev, |
468 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n" , |
469 | le32_to_cpu(tp->rx_ring[entry].buffer1), |
470 | (long long)tp->rx_buffers[entry].mapping, |
471 | skb->head, temp); |
472 | } |
473 | #endif |
474 | |
475 | dma_unmap_single(&tp->pdev->dev, |
476 | tp->rx_buffers[entry].mapping, |
477 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
478 | |
479 | tp->rx_buffers[entry].skb = NULL; |
480 | tp->rx_buffers[entry].mapping = 0; |
481 | } |
482 | skb->protocol = eth_type_trans(skb, dev); |
483 | |
484 | netif_rx(skb); |
485 | |
486 | dev->stats.rx_packets++; |
487 | dev->stats.rx_bytes += pkt_len; |
488 | } |
489 | received++; |
490 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
491 | } |
492 | return received; |
493 | } |
494 | #endif /* CONFIG_TULIP_NAPI */ |
495 | |
496 | static inline unsigned int phy_interrupt (struct net_device *dev) |
497 | { |
498 | #ifdef __hppa__ |
499 | struct tulip_private *tp = netdev_priv(dev); |
500 | int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; |
501 | |
502 | if (csr12 != tp->csr12_shadow) { |
503 | /* ack interrupt */ |
504 | iowrite32(csr12 | 0x02, tp->base_addr + CSR12); |
505 | tp->csr12_shadow = csr12; |
506 | /* do link change stuff */ |
507 | spin_lock(&tp->lock); |
508 | tulip_check_duplex(dev); |
509 | spin_unlock(&tp->lock); |
510 | /* clear irq ack bit */ |
511 | iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); |
512 | |
513 | return 1; |
514 | } |
515 | #endif |
516 | |
517 | return 0; |
518 | } |
519 | |
520 | /* The interrupt handler does all of the Rx thread work and cleans up |
521 | after the Tx thread. */ |
522 | irqreturn_t tulip_interrupt(int irq, void *dev_instance) |
523 | { |
524 | struct net_device *dev = (struct net_device *)dev_instance; |
525 | struct tulip_private *tp = netdev_priv(dev); |
526 | void __iomem *ioaddr = tp->base_addr; |
527 | int csr5; |
528 | int missed; |
529 | int rx = 0; |
530 | int tx = 0; |
531 | int oi = 0; |
532 | int maxrx = RX_RING_SIZE; |
533 | int maxtx = TX_RING_SIZE; |
534 | int maxoi = TX_RING_SIZE; |
535 | #ifdef CONFIG_TULIP_NAPI |
536 | int rxd = 0; |
537 | #else |
538 | int entry; |
539 | #endif |
540 | unsigned int work_count = tulip_max_interrupt_work; |
541 | unsigned int handled = 0; |
542 | |
543 | /* Let's see whether the interrupt really is for us */ |
544 | csr5 = ioread32(ioaddr + CSR5); |
545 | |
546 | if (tp->flags & HAS_PHY_IRQ) |
547 | handled = phy_interrupt (dev); |
548 | |
549 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) |
550 | return IRQ_RETVAL(handled); |
551 | |
552 | tp->nir++; |
553 | |
554 | do { |
555 | |
556 | #ifdef CONFIG_TULIP_NAPI |
557 | |
558 | if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { |
559 | rxd++; |
560 | /* Mask RX intrs and add the device to poll list. */ |
561 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); |
562 | napi_schedule(n: &tp->napi); |
563 | |
564 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
565 | break; |
566 | } |
567 | |
568 | /* Acknowledge the interrupt sources we handle here ASAP |
569 | the poll function does Rx and RxNoBuf acking */ |
570 | |
571 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); |
572 | |
573 | #else |
574 | /* Acknowledge all of the current interrupt sources ASAP. */ |
575 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); |
576 | |
577 | |
578 | if (csr5 & (RxIntr | RxNoBuf)) { |
579 | rx += tulip_rx(dev); |
580 | tulip_refill_rx(dev); |
581 | } |
582 | |
583 | #endif /* CONFIG_TULIP_NAPI */ |
584 | |
585 | if (tulip_debug > 4) |
586 | netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n" , |
587 | csr5, ioread32(ioaddr + CSR5)); |
588 | |
589 | |
590 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { |
591 | unsigned int dirty_tx; |
592 | |
593 | spin_lock(lock: &tp->lock); |
594 | |
595 | for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; |
596 | dirty_tx++) { |
597 | int entry = dirty_tx % TX_RING_SIZE; |
598 | int status = le32_to_cpu(tp->tx_ring[entry].status); |
599 | |
600 | if (status < 0) |
601 | break; /* It still has not been Txed */ |
602 | |
603 | /* Check for Rx filter setup frames. */ |
604 | if (tp->tx_buffers[entry].skb == NULL) { |
605 | /* test because dummy frames not mapped */ |
606 | if (tp->tx_buffers[entry].mapping) |
607 | dma_unmap_single(&tp->pdev->dev, |
608 | tp->tx_buffers[entry].mapping, |
609 | sizeof(tp->setup_frame), |
610 | DMA_TO_DEVICE); |
611 | continue; |
612 | } |
613 | |
614 | if (status & 0x8000) { |
615 | /* There was an major error, log it. */ |
616 | #ifndef final_version |
617 | if (tulip_debug > 1) |
618 | netdev_dbg(dev, "Transmit error, Tx status %08x\n" , |
619 | status); |
620 | #endif |
621 | dev->stats.tx_errors++; |
622 | if (status & 0x4104) |
623 | dev->stats.tx_aborted_errors++; |
624 | if (status & 0x0C00) |
625 | dev->stats.tx_carrier_errors++; |
626 | if (status & 0x0200) |
627 | dev->stats.tx_window_errors++; |
628 | if (status & 0x0002) |
629 | dev->stats.tx_fifo_errors++; |
630 | if ((status & 0x0080) && tp->full_duplex == 0) |
631 | dev->stats.tx_heartbeat_errors++; |
632 | } else { |
633 | dev->stats.tx_bytes += |
634 | tp->tx_buffers[entry].skb->len; |
635 | dev->stats.collisions += (status >> 3) & 15; |
636 | dev->stats.tx_packets++; |
637 | } |
638 | |
639 | dma_unmap_single(&tp->pdev->dev, |
640 | tp->tx_buffers[entry].mapping, |
641 | tp->tx_buffers[entry].skb->len, |
642 | DMA_TO_DEVICE); |
643 | |
644 | /* Free the original skb. */ |
645 | dev_kfree_skb_irq(skb: tp->tx_buffers[entry].skb); |
646 | tp->tx_buffers[entry].skb = NULL; |
647 | tp->tx_buffers[entry].mapping = 0; |
648 | tx++; |
649 | } |
650 | |
651 | #ifndef final_version |
652 | if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { |
653 | dev_err(&dev->dev, |
654 | "Out-of-sync dirty pointer, %d vs. %d\n" , |
655 | dirty_tx, tp->cur_tx); |
656 | dirty_tx += TX_RING_SIZE; |
657 | } |
658 | #endif |
659 | |
660 | if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) |
661 | netif_wake_queue(dev); |
662 | |
663 | tp->dirty_tx = dirty_tx; |
664 | if (csr5 & TxDied) { |
665 | if (tulip_debug > 2) |
666 | dev_warn(&dev->dev, |
667 | "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n" , |
668 | csr5, ioread32(ioaddr + CSR6), |
669 | tp->csr6); |
670 | tulip_restart_rxtx(tp); |
671 | } |
672 | spin_unlock(lock: &tp->lock); |
673 | } |
674 | |
675 | /* Log errors. */ |
676 | if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ |
677 | if (csr5 == 0xffffffff) |
678 | break; |
679 | if (csr5 & TxJabber) |
680 | dev->stats.tx_errors++; |
681 | if (csr5 & TxFIFOUnderflow) { |
682 | if ((tp->csr6 & 0xC000) != 0xC000) |
683 | tp->csr6 += 0x4000; /* Bump up the Tx threshold */ |
684 | else |
685 | tp->csr6 |= 0x00200000; /* Store-n-forward. */ |
686 | /* Restart the transmit process. */ |
687 | tulip_restart_rxtx(tp); |
688 | iowrite32(0, ioaddr + CSR1); |
689 | } |
690 | if (csr5 & (RxDied | RxNoBuf)) { |
691 | if (tp->flags & COMET_MAC_ADDR) { |
692 | iowrite32(tp->mc_filter[0], ioaddr + 0xAC); |
693 | iowrite32(tp->mc_filter[1], ioaddr + 0xB0); |
694 | } |
695 | } |
696 | if (csr5 & RxDied) { /* Missed a Rx frame. */ |
697 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
698 | dev->stats.rx_errors++; |
699 | tulip_start_rxtx(tp); |
700 | } |
701 | /* |
702 | * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this |
703 | * call is ever done under the spinlock |
704 | */ |
705 | if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { |
706 | if (tp->link_change) |
707 | (tp->link_change)(dev, csr5); |
708 | } |
709 | if (csr5 & SystemError) { |
710 | int error = (csr5 >> 23) & 7; |
711 | /* oops, we hit a PCI error. The code produced corresponds |
712 | * to the reason: |
713 | * 0 - parity error |
714 | * 1 - master abort |
715 | * 2 - target abort |
716 | * Note that on parity error, we should do a software reset |
717 | * of the chip to get it back into a sane state (according |
718 | * to the 21142/3 docs that is). |
719 | * -- rmk |
720 | */ |
721 | dev_err(&dev->dev, |
722 | "(%lu) System Error occurred (%d)\n" , |
723 | tp->nir, error); |
724 | } |
725 | /* Clear all error sources, included undocumented ones! */ |
726 | iowrite32(0x0800f7ba, ioaddr + CSR5); |
727 | oi++; |
728 | } |
729 | if (csr5 & TimerInt) { |
730 | |
731 | if (tulip_debug > 2) |
732 | dev_err(&dev->dev, |
733 | "Re-enabling interrupts, %08x\n" , |
734 | csr5); |
735 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); |
736 | tp->ttimer = 0; |
737 | oi++; |
738 | } |
739 | if (tx > maxtx || rx > maxrx || oi > maxoi) { |
740 | if (tulip_debug > 1) |
741 | dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n" , |
742 | csr5, tp->nir, tx, rx, oi); |
743 | |
744 | /* Acknowledge all interrupt sources. */ |
745 | iowrite32(0x8001ffff, ioaddr + CSR5); |
746 | if (tp->flags & HAS_INTR_MITIGATION) { |
747 | /* Josip Loncaric at ICASE did extensive experimentation |
748 | to develop a good interrupt mitigation setting.*/ |
749 | iowrite32(0x8b240000, ioaddr + CSR11); |
750 | } else if (tp->chip_id == LC82C168) { |
751 | /* the LC82C168 doesn't have a hw timer.*/ |
752 | iowrite32(0x00, ioaddr + CSR7); |
753 | mod_timer(timer: &tp->timer, RUN_AT(HZ/50)); |
754 | } else { |
755 | /* Mask all interrupting sources, set timer to |
756 | re-enable. */ |
757 | iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); |
758 | iowrite32(0x0012, ioaddr + CSR11); |
759 | } |
760 | break; |
761 | } |
762 | |
763 | work_count--; |
764 | if (work_count == 0) |
765 | break; |
766 | |
767 | csr5 = ioread32(ioaddr + CSR5); |
768 | |
769 | #ifdef CONFIG_TULIP_NAPI |
770 | if (rxd) |
771 | csr5 &= ~RxPollInt; |
772 | } while ((csr5 & (TxNoBuf | |
773 | TxDied | |
774 | TxIntr | |
775 | TimerInt | |
776 | /* Abnormal intr. */ |
777 | RxDied | |
778 | TxFIFOUnderflow | |
779 | TxJabber | |
780 | TPLnkFail | |
781 | SystemError )) != 0); |
782 | #else |
783 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); |
784 | |
785 | tulip_refill_rx(dev); |
786 | |
787 | /* check if the card is in suspend mode */ |
788 | entry = tp->dirty_rx % RX_RING_SIZE; |
789 | if (tp->rx_buffers[entry].skb == NULL) { |
790 | if (tulip_debug > 1) |
791 | dev_warn(&dev->dev, |
792 | "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n" , |
793 | tp->nir, tp->cur_rx, tp->ttimer, rx); |
794 | if (tp->chip_id == LC82C168) { |
795 | iowrite32(0x00, ioaddr + CSR7); |
796 | mod_timer(&tp->timer, RUN_AT(HZ/50)); |
797 | } else { |
798 | if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { |
799 | if (tulip_debug > 1) |
800 | dev_warn(&dev->dev, |
801 | "in rx suspend mode: (%lu) set timer\n" , |
802 | tp->nir); |
803 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, |
804 | ioaddr + CSR7); |
805 | iowrite32(TimerInt, ioaddr + CSR5); |
806 | iowrite32(12, ioaddr + CSR11); |
807 | tp->ttimer = 1; |
808 | } |
809 | } |
810 | } |
811 | #endif /* CONFIG_TULIP_NAPI */ |
812 | |
813 | if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { |
814 | dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; |
815 | } |
816 | |
817 | if (tulip_debug > 4) |
818 | netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n" , |
819 | ioread32(ioaddr + CSR5)); |
820 | |
821 | return IRQ_HANDLED; |
822 | } |
823 | |