1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. |
4 | * |
5 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
6 | */ |
7 | |
8 | #undef DEBUG |
9 | |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/types.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/string.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/netdevice.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/etherdevice.h> |
22 | #include <linux/skbuff.h> |
23 | |
24 | #include <asm/sgi/hpc3.h> |
25 | #include <asm/sgi/ip22.h> |
26 | #include <asm/sgi/seeq.h> |
27 | |
28 | #include "sgiseeq.h" |
29 | |
30 | static char *sgiseeqstr = "SGI Seeq8003" ; |
31 | |
32 | /* |
33 | * If you want speed, you do something silly, it always has worked for me. So, |
34 | * with that in mind, I've decided to make this driver look completely like a |
35 | * stupid Lance from a driver architecture perspective. Only difference is that |
36 | * here our "ring buffer" looks and acts like a real Lance one does but is |
37 | * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised |
38 | * how a stupid idea like this can pay off in performance, not to mention |
39 | * making this driver 2,000 times easier to write. ;-) |
40 | */ |
41 | |
42 | /* Tune these if we tend to run out often etc. */ |
43 | #define SEEQ_RX_BUFFERS 16 |
44 | #define SEEQ_TX_BUFFERS 16 |
45 | |
46 | #define PKT_BUF_SZ 1584 |
47 | |
48 | #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) |
49 | #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) |
50 | #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) |
51 | #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) |
52 | |
53 | #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ |
54 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ |
55 | sp->tx_old - sp->tx_new - 1) |
56 | |
57 | #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ |
58 | (dma_addr_t)((unsigned long)(v) - \ |
59 | (unsigned long)((sp)->rx_desc))) |
60 | |
61 | /* Copy frames shorter than rx_copybreak, otherwise pass on up in |
62 | * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). |
63 | */ |
64 | static int rx_copybreak = 100; |
65 | |
66 | #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) |
67 | |
68 | struct sgiseeq_rx_desc { |
69 | volatile struct hpc_dma_desc rdma; |
70 | u8 padding[PAD_SIZE]; |
71 | struct sk_buff *skb; |
72 | }; |
73 | |
74 | struct sgiseeq_tx_desc { |
75 | volatile struct hpc_dma_desc tdma; |
76 | u8 padding[PAD_SIZE]; |
77 | struct sk_buff *skb; |
78 | }; |
79 | |
80 | /* |
81 | * Warning: This structure is laid out in a certain way because HPC dma |
82 | * descriptors must be 8-byte aligned. So don't touch this without |
83 | * some care. |
84 | */ |
85 | struct sgiseeq_init_block { /* Note the name ;-) */ |
86 | struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; |
87 | struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; |
88 | }; |
89 | |
90 | struct sgiseeq_private { |
91 | struct sgiseeq_init_block *srings; |
92 | dma_addr_t srings_dma; |
93 | |
94 | /* Ptrs to the descriptors in uncached space. */ |
95 | struct sgiseeq_rx_desc *rx_desc; |
96 | struct sgiseeq_tx_desc *tx_desc; |
97 | |
98 | char *name; |
99 | struct hpc3_ethregs *hregs; |
100 | struct sgiseeq_regs *sregs; |
101 | |
102 | /* Ring entry counters. */ |
103 | unsigned int rx_new, tx_new; |
104 | unsigned int rx_old, tx_old; |
105 | |
106 | int is_edlc; |
107 | unsigned char control; |
108 | unsigned char mode; |
109 | |
110 | spinlock_t tx_lock; |
111 | }; |
112 | |
113 | static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) |
114 | { |
115 | struct sgiseeq_private *sp = netdev_priv(dev); |
116 | |
117 | dma_sync_single_for_cpu(dev: dev->dev.parent, VIRT_TO_DMA(sp, addr), |
118 | size: sizeof(struct sgiseeq_rx_desc), dir: DMA_BIDIRECTIONAL); |
119 | } |
120 | |
121 | static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) |
122 | { |
123 | struct sgiseeq_private *sp = netdev_priv(dev); |
124 | |
125 | dma_sync_single_for_device(dev: dev->dev.parent, VIRT_TO_DMA(sp, addr), |
126 | size: sizeof(struct sgiseeq_rx_desc), dir: DMA_BIDIRECTIONAL); |
127 | } |
128 | |
129 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) |
130 | { |
131 | hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; |
132 | udelay(20); |
133 | hregs->reset = 0; |
134 | } |
135 | |
136 | static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, |
137 | struct sgiseeq_regs *sregs) |
138 | { |
139 | hregs->rx_ctrl = hregs->tx_ctrl = 0; |
140 | hpc3_eth_reset(hregs); |
141 | } |
142 | |
143 | #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ |
144 | SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) |
145 | |
146 | static inline void seeq_go(struct sgiseeq_private *sp, |
147 | struct hpc3_ethregs *hregs, |
148 | struct sgiseeq_regs *sregs) |
149 | { |
150 | sregs->rstat = sp->mode | RSTAT_GO_BITS; |
151 | hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; |
152 | } |
153 | |
154 | static inline void __sgiseeq_set_mac_address(struct net_device *dev) |
155 | { |
156 | struct sgiseeq_private *sp = netdev_priv(dev); |
157 | struct sgiseeq_regs *sregs = sp->sregs; |
158 | int i; |
159 | |
160 | sregs->tstat = SEEQ_TCMD_RB0; |
161 | for (i = 0; i < 6; i++) |
162 | sregs->rw.eth_addr[i] = dev->dev_addr[i]; |
163 | } |
164 | |
165 | static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) |
166 | { |
167 | struct sgiseeq_private *sp = netdev_priv(dev); |
168 | struct sockaddr *sa = addr; |
169 | |
170 | eth_hw_addr_set(dev, addr: sa->sa_data); |
171 | |
172 | spin_lock_irq(lock: &sp->tx_lock); |
173 | __sgiseeq_set_mac_address(dev); |
174 | spin_unlock_irq(lock: &sp->tx_lock); |
175 | |
176 | return 0; |
177 | } |
178 | |
179 | #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) |
180 | #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) |
181 | #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) |
182 | |
183 | static int seeq_init_ring(struct net_device *dev) |
184 | { |
185 | struct sgiseeq_private *sp = netdev_priv(dev); |
186 | int i; |
187 | |
188 | netif_stop_queue(dev); |
189 | sp->rx_new = sp->tx_new = 0; |
190 | sp->rx_old = sp->tx_old = 0; |
191 | |
192 | __sgiseeq_set_mac_address(dev); |
193 | |
194 | /* Setup tx ring. */ |
195 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { |
196 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; |
197 | dma_sync_desc_dev(dev, addr: &sp->tx_desc[i]); |
198 | } |
199 | |
200 | /* And now the rx ring. */ |
201 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { |
202 | if (!sp->rx_desc[i].skb) { |
203 | dma_addr_t dma_addr; |
204 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
205 | |
206 | if (skb == NULL) |
207 | return -ENOMEM; |
208 | skb_reserve(skb, len: 2); |
209 | dma_addr = dma_map_single(dev->dev.parent, |
210 | skb->data - 2, |
211 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
212 | sp->rx_desc[i].skb = skb; |
213 | sp->rx_desc[i].rdma.pbuf = dma_addr; |
214 | } |
215 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; |
216 | dma_sync_desc_dev(dev, addr: &sp->rx_desc[i]); |
217 | } |
218 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; |
219 | dma_sync_desc_dev(dev, addr: &sp->rx_desc[i - 1]); |
220 | return 0; |
221 | } |
222 | |
223 | static void seeq_purge_ring(struct net_device *dev) |
224 | { |
225 | struct sgiseeq_private *sp = netdev_priv(dev); |
226 | int i; |
227 | |
228 | /* clear tx ring. */ |
229 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { |
230 | if (sp->tx_desc[i].skb) { |
231 | dev_kfree_skb(sp->tx_desc[i].skb); |
232 | sp->tx_desc[i].skb = NULL; |
233 | } |
234 | } |
235 | |
236 | /* And now the rx ring. */ |
237 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { |
238 | if (sp->rx_desc[i].skb) { |
239 | dev_kfree_skb(sp->rx_desc[i].skb); |
240 | sp->rx_desc[i].skb = NULL; |
241 | } |
242 | } |
243 | } |
244 | |
245 | #ifdef DEBUG |
246 | static struct sgiseeq_private *gpriv; |
247 | static struct net_device *gdev; |
248 | |
249 | static void sgiseeq_dump_rings(void) |
250 | { |
251 | static int once; |
252 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; |
253 | struct sgiseeq_tx_desc *t = gpriv->tx_desc; |
254 | struct hpc3_ethregs *hregs = gpriv->hregs; |
255 | int i; |
256 | |
257 | if (once) |
258 | return; |
259 | once++; |
260 | printk("RING DUMP:\n" ); |
261 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { |
262 | printk("RX [%d]: @(%p) [%08x,%08x,%08x] " , |
263 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, |
264 | r[i].rdma.pnext); |
265 | i += 1; |
266 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n" , |
267 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, |
268 | r[i].rdma.pnext); |
269 | } |
270 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { |
271 | printk("TX [%d]: @(%p) [%08x,%08x,%08x] " , |
272 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, |
273 | t[i].tdma.pnext); |
274 | i += 1; |
275 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n" , |
276 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, |
277 | t[i].tdma.pnext); |
278 | } |
279 | printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n" , |
280 | gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); |
281 | printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n" , |
282 | hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); |
283 | printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n" , |
284 | hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); |
285 | } |
286 | #endif |
287 | |
288 | #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) |
289 | #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) |
290 | |
291 | static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, |
292 | struct sgiseeq_regs *sregs) |
293 | { |
294 | struct hpc3_ethregs *hregs = sp->hregs; |
295 | int err; |
296 | |
297 | reset_hpc3_and_seeq(hregs, sregs); |
298 | err = seeq_init_ring(dev); |
299 | if (err) |
300 | return err; |
301 | |
302 | /* Setup to field the proper interrupt types. */ |
303 | if (sp->is_edlc) { |
304 | sregs->tstat = TSTAT_INIT_EDLC; |
305 | sregs->rw.wregs.control = sp->control; |
306 | sregs->rw.wregs.frame_gap = 0; |
307 | } else { |
308 | sregs->tstat = TSTAT_INIT_SEEQ; |
309 | } |
310 | |
311 | hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); |
312 | hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); |
313 | |
314 | seeq_go(sp, hregs, sregs); |
315 | return 0; |
316 | } |
317 | |
318 | static void record_rx_errors(struct net_device *dev, unsigned char status) |
319 | { |
320 | if (status & SEEQ_RSTAT_OVERF || |
321 | status & SEEQ_RSTAT_SFRAME) |
322 | dev->stats.rx_over_errors++; |
323 | if (status & SEEQ_RSTAT_CERROR) |
324 | dev->stats.rx_crc_errors++; |
325 | if (status & SEEQ_RSTAT_DERROR) |
326 | dev->stats.rx_frame_errors++; |
327 | if (status & SEEQ_RSTAT_REOF) |
328 | dev->stats.rx_errors++; |
329 | } |
330 | |
331 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, |
332 | struct hpc3_ethregs *hregs, |
333 | struct sgiseeq_regs *sregs) |
334 | { |
335 | if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { |
336 | hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); |
337 | seeq_go(sp, hregs, sregs); |
338 | } |
339 | } |
340 | |
341 | static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, |
342 | struct hpc3_ethregs *hregs, |
343 | struct sgiseeq_regs *sregs) |
344 | { |
345 | struct sgiseeq_rx_desc *rd; |
346 | struct sk_buff *skb = NULL; |
347 | struct sk_buff *newskb; |
348 | unsigned char pkt_status; |
349 | int len = 0; |
350 | unsigned int orig_end = PREV_RX(sp->rx_new); |
351 | |
352 | /* Service every received packet. */ |
353 | rd = &sp->rx_desc[sp->rx_new]; |
354 | dma_sync_desc_cpu(dev, addr: rd); |
355 | while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { |
356 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; |
357 | dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, |
358 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
359 | pkt_status = rd->skb->data[len]; |
360 | if (pkt_status & SEEQ_RSTAT_FIG) { |
361 | /* Packet is OK. */ |
362 | /* We don't want to receive our own packets */ |
363 | if (!ether_addr_equal(addr1: rd->skb->data + 6, addr2: dev->dev_addr)) { |
364 | if (len > rx_copybreak) { |
365 | skb = rd->skb; |
366 | newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
367 | if (!newskb) { |
368 | newskb = skb; |
369 | skb = NULL; |
370 | goto memory_squeeze; |
371 | } |
372 | skb_reserve(skb: newskb, len: 2); |
373 | } else { |
374 | skb = netdev_alloc_skb_ip_align(dev, length: len); |
375 | if (skb) |
376 | skb_copy_to_linear_data(skb, from: rd->skb->data, len); |
377 | |
378 | newskb = rd->skb; |
379 | } |
380 | memory_squeeze: |
381 | if (skb) { |
382 | skb_put(skb, len); |
383 | skb->protocol = eth_type_trans(skb, dev); |
384 | netif_rx(skb); |
385 | dev->stats.rx_packets++; |
386 | dev->stats.rx_bytes += len; |
387 | } else { |
388 | dev->stats.rx_dropped++; |
389 | } |
390 | } else { |
391 | /* Silently drop my own packets */ |
392 | newskb = rd->skb; |
393 | } |
394 | } else { |
395 | record_rx_errors(dev, status: pkt_status); |
396 | newskb = rd->skb; |
397 | } |
398 | rd->skb = newskb; |
399 | rd->rdma.pbuf = dma_map_single(dev->dev.parent, |
400 | newskb->data - 2, |
401 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
402 | |
403 | /* Return the entry to the ring pool. */ |
404 | rd->rdma.cntinfo = RCNTINFO_INIT; |
405 | sp->rx_new = NEXT_RX(sp->rx_new); |
406 | dma_sync_desc_dev(dev, addr: rd); |
407 | rd = &sp->rx_desc[sp->rx_new]; |
408 | dma_sync_desc_cpu(dev, addr: rd); |
409 | } |
410 | dma_sync_desc_dev(dev, addr: rd); |
411 | |
412 | dma_sync_desc_cpu(dev, addr: &sp->rx_desc[orig_end]); |
413 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); |
414 | dma_sync_desc_dev(dev, addr: &sp->rx_desc[orig_end]); |
415 | dma_sync_desc_cpu(dev, addr: &sp->rx_desc[PREV_RX(sp->rx_new)]); |
416 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; |
417 | dma_sync_desc_dev(dev, addr: &sp->rx_desc[PREV_RX(sp->rx_new)]); |
418 | rx_maybe_restart(sp, hregs, sregs); |
419 | } |
420 | |
421 | static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, |
422 | struct sgiseeq_regs *sregs) |
423 | { |
424 | if (sp->is_edlc) { |
425 | sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); |
426 | sregs->rw.wregs.control = sp->control; |
427 | } |
428 | } |
429 | |
430 | static inline void kick_tx(struct net_device *dev, |
431 | struct sgiseeq_private *sp, |
432 | struct hpc3_ethregs *hregs) |
433 | { |
434 | struct sgiseeq_tx_desc *td; |
435 | int i = sp->tx_old; |
436 | |
437 | /* If the HPC aint doin nothin, and there are more packets |
438 | * with ETXD cleared and XIU set we must make very certain |
439 | * that we restart the HPC else we risk locking up the |
440 | * adapter. The following code is only safe iff the HPCDMA |
441 | * is not active! |
442 | */ |
443 | td = &sp->tx_desc[i]; |
444 | dma_sync_desc_cpu(dev, addr: td); |
445 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == |
446 | (HPCDMA_XIU | HPCDMA_ETXD)) { |
447 | i = NEXT_TX(i); |
448 | td = &sp->tx_desc[i]; |
449 | dma_sync_desc_cpu(dev, addr: td); |
450 | } |
451 | if (td->tdma.cntinfo & HPCDMA_XIU) { |
452 | dma_sync_desc_dev(dev, addr: td); |
453 | hregs->tx_ndptr = VIRT_TO_DMA(sp, td); |
454 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; |
455 | } |
456 | } |
457 | |
458 | static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, |
459 | struct hpc3_ethregs *hregs, |
460 | struct sgiseeq_regs *sregs) |
461 | { |
462 | struct sgiseeq_tx_desc *td; |
463 | unsigned long status = hregs->tx_ctrl; |
464 | int j; |
465 | |
466 | tx_maybe_reset_collisions(sp, sregs); |
467 | |
468 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { |
469 | /* Oops, HPC detected some sort of error. */ |
470 | if (status & SEEQ_TSTAT_R16) |
471 | dev->stats.tx_aborted_errors++; |
472 | if (status & SEEQ_TSTAT_UFLOW) |
473 | dev->stats.tx_fifo_errors++; |
474 | if (status & SEEQ_TSTAT_LCLS) |
475 | dev->stats.collisions++; |
476 | } |
477 | |
478 | /* Ack 'em... */ |
479 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { |
480 | td = &sp->tx_desc[j]; |
481 | |
482 | dma_sync_desc_cpu(dev, addr: td); |
483 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) |
484 | break; |
485 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { |
486 | dma_sync_desc_dev(dev, addr: td); |
487 | if (!(status & HPC3_ETXCTRL_ACTIVE)) { |
488 | hregs->tx_ndptr = VIRT_TO_DMA(sp, td); |
489 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; |
490 | } |
491 | break; |
492 | } |
493 | dev->stats.tx_packets++; |
494 | sp->tx_old = NEXT_TX(sp->tx_old); |
495 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); |
496 | td->tdma.cntinfo |= HPCDMA_EOX; |
497 | if (td->skb) { |
498 | dev_kfree_skb_any(skb: td->skb); |
499 | td->skb = NULL; |
500 | } |
501 | dma_sync_desc_dev(dev, addr: td); |
502 | } |
503 | } |
504 | |
505 | static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) |
506 | { |
507 | struct net_device *dev = (struct net_device *) dev_id; |
508 | struct sgiseeq_private *sp = netdev_priv(dev); |
509 | struct hpc3_ethregs *hregs = sp->hregs; |
510 | struct sgiseeq_regs *sregs = sp->sregs; |
511 | |
512 | spin_lock(lock: &sp->tx_lock); |
513 | |
514 | /* Ack the IRQ and set software state. */ |
515 | hregs->reset = HPC3_ERST_CLRIRQ; |
516 | |
517 | /* Always check for received packets. */ |
518 | sgiseeq_rx(dev, sp, hregs, sregs); |
519 | |
520 | /* Only check for tx acks if we have something queued. */ |
521 | if (sp->tx_old != sp->tx_new) |
522 | sgiseeq_tx(dev, sp, hregs, sregs); |
523 | |
524 | if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { |
525 | netif_wake_queue(dev); |
526 | } |
527 | spin_unlock(lock: &sp->tx_lock); |
528 | |
529 | return IRQ_HANDLED; |
530 | } |
531 | |
532 | static int sgiseeq_open(struct net_device *dev) |
533 | { |
534 | struct sgiseeq_private *sp = netdev_priv(dev); |
535 | struct sgiseeq_regs *sregs = sp->sregs; |
536 | unsigned int irq = dev->irq; |
537 | int err; |
538 | |
539 | if (request_irq(irq, handler: sgiseeq_interrupt, flags: 0, name: sgiseeqstr, dev)) { |
540 | printk(KERN_ERR "Seeq8003: Can't get irq %d\n" , dev->irq); |
541 | return -EAGAIN; |
542 | } |
543 | |
544 | err = init_seeq(dev, sp, sregs); |
545 | if (err) |
546 | goto out_free_irq; |
547 | |
548 | netif_start_queue(dev); |
549 | |
550 | return 0; |
551 | |
552 | out_free_irq: |
553 | free_irq(irq, dev); |
554 | |
555 | return err; |
556 | } |
557 | |
558 | static int sgiseeq_close(struct net_device *dev) |
559 | { |
560 | struct sgiseeq_private *sp = netdev_priv(dev); |
561 | struct sgiseeq_regs *sregs = sp->sregs; |
562 | unsigned int irq = dev->irq; |
563 | |
564 | netif_stop_queue(dev); |
565 | |
566 | /* Shutdown the Seeq. */ |
567 | reset_hpc3_and_seeq(hregs: sp->hregs, sregs); |
568 | free_irq(irq, dev); |
569 | seeq_purge_ring(dev); |
570 | |
571 | return 0; |
572 | } |
573 | |
574 | static inline int sgiseeq_reset(struct net_device *dev) |
575 | { |
576 | struct sgiseeq_private *sp = netdev_priv(dev); |
577 | struct sgiseeq_regs *sregs = sp->sregs; |
578 | int err; |
579 | |
580 | err = init_seeq(dev, sp, sregs); |
581 | if (err) |
582 | return err; |
583 | |
584 | netif_trans_update(dev); /* prevent tx timeout */ |
585 | netif_wake_queue(dev); |
586 | |
587 | return 0; |
588 | } |
589 | |
590 | static netdev_tx_t |
591 | sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) |
592 | { |
593 | struct sgiseeq_private *sp = netdev_priv(dev); |
594 | struct hpc3_ethregs *hregs = sp->hregs; |
595 | unsigned long flags; |
596 | struct sgiseeq_tx_desc *td; |
597 | int len, entry; |
598 | |
599 | spin_lock_irqsave(&sp->tx_lock, flags); |
600 | |
601 | /* Setup... */ |
602 | len = skb->len; |
603 | if (len < ETH_ZLEN) { |
604 | if (skb_padto(skb, ETH_ZLEN)) { |
605 | spin_unlock_irqrestore(lock: &sp->tx_lock, flags); |
606 | return NETDEV_TX_OK; |
607 | } |
608 | len = ETH_ZLEN; |
609 | } |
610 | |
611 | dev->stats.tx_bytes += len; |
612 | entry = sp->tx_new; |
613 | td = &sp->tx_desc[entry]; |
614 | dma_sync_desc_cpu(dev, addr: td); |
615 | |
616 | /* Create entry. There are so many races with adding a new |
617 | * descriptor to the chain: |
618 | * 1) Assume that the HPC is off processing a DMA chain while |
619 | * we are changing all of the following. |
620 | * 2) Do no allow the HPC to look at a new descriptor until |
621 | * we have completely set up it's state. This means, do |
622 | * not clear HPCDMA_EOX in the current last descritptor |
623 | * until the one we are adding looks consistent and could |
624 | * be processes right now. |
625 | * 3) The tx interrupt code must notice when we've added a new |
626 | * entry and the HPC got to the end of the chain before we |
627 | * added this new entry and restarted it. |
628 | */ |
629 | td->skb = skb; |
630 | td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, |
631 | len, DMA_TO_DEVICE); |
632 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | |
633 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; |
634 | dma_sync_desc_dev(dev, addr: td); |
635 | if (sp->tx_old != sp->tx_new) { |
636 | struct sgiseeq_tx_desc *backend; |
637 | |
638 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; |
639 | dma_sync_desc_cpu(dev, addr: backend); |
640 | backend->tdma.cntinfo &= ~HPCDMA_EOX; |
641 | dma_sync_desc_dev(dev, addr: backend); |
642 | } |
643 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ |
644 | |
645 | /* Maybe kick the HPC back into motion. */ |
646 | if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) |
647 | kick_tx(dev, sp, hregs); |
648 | |
649 | if (!TX_BUFFS_AVAIL(sp)) |
650 | netif_stop_queue(dev); |
651 | spin_unlock_irqrestore(lock: &sp->tx_lock, flags); |
652 | |
653 | return NETDEV_TX_OK; |
654 | } |
655 | |
656 | static void timeout(struct net_device *dev, unsigned int txqueue) |
657 | { |
658 | printk(KERN_NOTICE "%s: transmit timed out, resetting\n" , dev->name); |
659 | sgiseeq_reset(dev); |
660 | |
661 | netif_trans_update(dev); /* prevent tx timeout */ |
662 | netif_wake_queue(dev); |
663 | } |
664 | |
665 | static void sgiseeq_set_multicast(struct net_device *dev) |
666 | { |
667 | struct sgiseeq_private *sp = netdev_priv(dev); |
668 | unsigned char oldmode = sp->mode; |
669 | |
670 | if(dev->flags & IFF_PROMISC) |
671 | sp->mode = SEEQ_RCMD_RANY; |
672 | else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) |
673 | sp->mode = SEEQ_RCMD_RBMCAST; |
674 | else |
675 | sp->mode = SEEQ_RCMD_RBCAST; |
676 | |
677 | /* XXX I know this sucks, but is there a better way to reprogram |
678 | * XXX the receiver? At least, this shouldn't happen too often. |
679 | */ |
680 | |
681 | if (oldmode != sp->mode) |
682 | sgiseeq_reset(dev); |
683 | } |
684 | |
685 | static inline void setup_tx_ring(struct net_device *dev, |
686 | struct sgiseeq_tx_desc *buf, |
687 | int nbufs) |
688 | { |
689 | struct sgiseeq_private *sp = netdev_priv(dev); |
690 | int i = 0; |
691 | |
692 | while (i < (nbufs - 1)) { |
693 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); |
694 | buf[i].tdma.pbuf = 0; |
695 | dma_sync_desc_dev(dev, addr: &buf[i]); |
696 | i++; |
697 | } |
698 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); |
699 | dma_sync_desc_dev(dev, addr: &buf[i]); |
700 | } |
701 | |
702 | static inline void setup_rx_ring(struct net_device *dev, |
703 | struct sgiseeq_rx_desc *buf, |
704 | int nbufs) |
705 | { |
706 | struct sgiseeq_private *sp = netdev_priv(dev); |
707 | int i = 0; |
708 | |
709 | while (i < (nbufs - 1)) { |
710 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); |
711 | buf[i].rdma.pbuf = 0; |
712 | dma_sync_desc_dev(dev, addr: &buf[i]); |
713 | i++; |
714 | } |
715 | buf[i].rdma.pbuf = 0; |
716 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); |
717 | dma_sync_desc_dev(dev, addr: &buf[i]); |
718 | } |
719 | |
720 | static const struct net_device_ops sgiseeq_netdev_ops = { |
721 | .ndo_open = sgiseeq_open, |
722 | .ndo_stop = sgiseeq_close, |
723 | .ndo_start_xmit = sgiseeq_start_xmit, |
724 | .ndo_tx_timeout = timeout, |
725 | .ndo_set_rx_mode = sgiseeq_set_multicast, |
726 | .ndo_set_mac_address = sgiseeq_set_mac_address, |
727 | .ndo_validate_addr = eth_validate_addr, |
728 | }; |
729 | |
730 | static int sgiseeq_probe(struct platform_device *pdev) |
731 | { |
732 | struct sgiseeq_platform_data *pd = dev_get_platdata(dev: &pdev->dev); |
733 | struct hpc3_regs *hpcregs = pd->hpc; |
734 | struct sgiseeq_init_block *sr; |
735 | unsigned int irq = pd->irq; |
736 | struct sgiseeq_private *sp; |
737 | struct net_device *dev; |
738 | int err; |
739 | |
740 | dev = alloc_etherdev(sizeof (struct sgiseeq_private)); |
741 | if (!dev) { |
742 | err = -ENOMEM; |
743 | goto err_out; |
744 | } |
745 | |
746 | platform_set_drvdata(pdev, data: dev); |
747 | SET_NETDEV_DEV(dev, &pdev->dev); |
748 | sp = netdev_priv(dev); |
749 | |
750 | /* Make private data page aligned */ |
751 | sr = dma_alloc_noncoherent(dev: &pdev->dev, size: sizeof(*sp->srings), |
752 | dma_handle: &sp->srings_dma, dir: DMA_BIDIRECTIONAL, GFP_KERNEL); |
753 | if (!sr) { |
754 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n" ); |
755 | err = -ENOMEM; |
756 | goto err_out_free_dev; |
757 | } |
758 | sp->srings = sr; |
759 | sp->rx_desc = sp->srings->rxvector; |
760 | sp->tx_desc = sp->srings->txvector; |
761 | spin_lock_init(&sp->tx_lock); |
762 | |
763 | /* A couple calculations now, saves many cycles later. */ |
764 | setup_rx_ring(dev, buf: sp->rx_desc, SEEQ_RX_BUFFERS); |
765 | setup_tx_ring(dev, buf: sp->tx_desc, SEEQ_TX_BUFFERS); |
766 | |
767 | eth_hw_addr_set(dev, addr: pd->mac); |
768 | |
769 | #ifdef DEBUG |
770 | gpriv = sp; |
771 | gdev = dev; |
772 | #endif |
773 | sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; |
774 | sp->hregs = &hpcregs->ethregs; |
775 | sp->name = sgiseeqstr; |
776 | sp->mode = SEEQ_RCMD_RBCAST; |
777 | |
778 | /* Setup PIO and DMA transfer timing */ |
779 | sp->hregs->pconfig = 0x161; |
780 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | |
781 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; |
782 | |
783 | /* Setup PIO and DMA transfer timing */ |
784 | sp->hregs->pconfig = 0x161; |
785 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | |
786 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; |
787 | |
788 | /* Reset the chip. */ |
789 | hpc3_eth_reset(hregs: sp->hregs); |
790 | |
791 | sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); |
792 | if (sp->is_edlc) |
793 | sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | |
794 | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | |
795 | SEEQ_CTRL_ENCARR; |
796 | |
797 | dev->netdev_ops = &sgiseeq_netdev_ops; |
798 | dev->watchdog_timeo = (200 * HZ) / 1000; |
799 | dev->irq = irq; |
800 | |
801 | if (register_netdev(dev)) { |
802 | printk(KERN_ERR "Sgiseeq: Cannot register net device, " |
803 | "aborting.\n" ); |
804 | err = -ENODEV; |
805 | goto err_out_free_attrs; |
806 | } |
807 | |
808 | printk(KERN_INFO "%s: %s %pM\n" , dev->name, sgiseeqstr, dev->dev_addr); |
809 | |
810 | return 0; |
811 | |
812 | err_out_free_attrs: |
813 | dma_free_noncoherent(dev: &pdev->dev, size: sizeof(*sp->srings), vaddr: sp->srings, |
814 | dma_handle: sp->srings_dma, dir: DMA_BIDIRECTIONAL); |
815 | err_out_free_dev: |
816 | free_netdev(dev); |
817 | |
818 | err_out: |
819 | return err; |
820 | } |
821 | |
822 | static void sgiseeq_remove(struct platform_device *pdev) |
823 | { |
824 | struct net_device *dev = platform_get_drvdata(pdev); |
825 | struct sgiseeq_private *sp = netdev_priv(dev); |
826 | |
827 | unregister_netdev(dev); |
828 | dma_free_noncoherent(dev: &pdev->dev, size: sizeof(*sp->srings), vaddr: sp->srings, |
829 | dma_handle: sp->srings_dma, dir: DMA_BIDIRECTIONAL); |
830 | free_netdev(dev); |
831 | } |
832 | |
833 | static struct platform_driver sgiseeq_driver = { |
834 | .probe = sgiseeq_probe, |
835 | .remove_new = sgiseeq_remove, |
836 | .driver = { |
837 | .name = "sgiseeq" , |
838 | } |
839 | }; |
840 | |
841 | module_platform_driver(sgiseeq_driver); |
842 | |
843 | MODULE_DESCRIPTION("SGI Seeq 8003 driver" ); |
844 | MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>" ); |
845 | MODULE_LICENSE("GPL" ); |
846 | MODULE_ALIAS("platform:sgiseeq" ); |
847 | |