1 | /* |
---|---|
2 | * Linux ARCnet driver - device-independent routines |
3 | * |
4 | * Written 1997 by David Woodhouse. |
5 | * Written 1994-1999 by Avery Pennarun. |
6 | * Written 1999-2000 by Martin Mares <mj@ucw.cz>. |
7 | * Derived from skeleton.c by Donald Becker. |
8 | * |
9 | * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) |
10 | * for sponsoring the further development of this driver. |
11 | * |
12 | * ********************** |
13 | * |
14 | * The original copyright was as follows: |
15 | * |
16 | * skeleton.c Written 1993 by Donald Becker. |
17 | * Copyright 1993 United States Government as represented by the |
18 | * Director, National Security Agency. This software may only be used |
19 | * and distributed according to the terms of the GNU General Public License as |
20 | * modified by SRC, incorporated herein by reference. |
21 | * |
22 | * ********************** |
23 | * |
24 | * The change log is now in a file called ChangeLog in this directory. |
25 | * |
26 | * Sources: |
27 | * - Crynwr arcnet.com/arcether.com packet drivers. |
28 | * - arcnet.c v0.00 dated 1/1/94 and apparently by |
29 | * Donald Becker - it didn't work :) |
30 | * - skeleton.c v0.05 dated 11/16/93 by Donald Becker |
31 | * (from Linux Kernel 1.1.45) |
32 | * - RFC's 1201 and 1051 - re: TCP/IP over ARCnet |
33 | * - The official ARCnet COM9026 data sheets (!) thanks to |
34 | * Ken Cornetet <kcornete@nyx10.cs.du.edu> |
35 | * - The official ARCnet COM20020 data sheets. |
36 | * - Information on some more obscure ARCnet controller chips, thanks |
37 | * to the nice people at SMSC. |
38 | * - net/inet/eth.c (from kernel 1.1.50) for header-building info. |
39 | * - Alternate Linux ARCnet source by V.Shergin <vsher@sao.stavropol.su> |
40 | * - Textual information and more alternate source from Joachim Koenig |
41 | * <jojo@repas.de> |
42 | */ |
43 | |
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | |
46 | #include <linux/module.h> |
47 | #include <linux/types.h> |
48 | #include <linux/delay.h> |
49 | #include <linux/netdevice.h> |
50 | #include <linux/if_arp.h> |
51 | #include <net/arp.h> |
52 | #include <linux/init.h> |
53 | #include <linux/jiffies.h> |
54 | #include <linux/errqueue.h> |
55 | |
56 | #include <linux/leds.h> |
57 | #include <linux/workqueue.h> |
58 | |
59 | #include "arcdevice.h" |
60 | #include "com9026.h" |
61 | |
62 | /* "do nothing" functions for protocol drivers */ |
63 | static void null_rx(struct net_device *dev, int bufnum, |
64 | struct archdr *pkthdr, int length); |
65 | static int null_build_header(struct sk_buff *skb, struct net_device *dev, |
66 | unsigned short type, uint8_t daddr); |
67 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, |
68 | int length, int bufnum); |
69 | |
70 | static void arcnet_rx(struct net_device *dev, int bufnum); |
71 | |
72 | /* one ArcProto per possible proto ID. None of the elements of |
73 | * arc_proto_map are allowed to be NULL; they will get set to |
74 | * arc_proto_default instead. It also must not be NULL; if you would like |
75 | * to set it to NULL, set it to &arc_proto_null instead. |
76 | */ |
77 | struct ArcProto *arc_proto_map[256]; |
78 | EXPORT_SYMBOL(arc_proto_map); |
79 | |
80 | struct ArcProto *arc_proto_default; |
81 | EXPORT_SYMBOL(arc_proto_default); |
82 | |
83 | struct ArcProto *arc_bcast_proto; |
84 | EXPORT_SYMBOL(arc_bcast_proto); |
85 | |
86 | struct ArcProto *arc_raw_proto; |
87 | EXPORT_SYMBOL(arc_raw_proto); |
88 | |
89 | static struct ArcProto arc_proto_null = { |
90 | .suffix = '?', |
91 | .mtu = XMTU, |
92 | .is_ip = 0, |
93 | .rx = null_rx, |
94 | .build_header = null_build_header, |
95 | .prepare_tx = null_prepare_tx, |
96 | .continue_tx = NULL, |
97 | .ack_tx = NULL |
98 | }; |
99 | |
100 | /* Exported function prototypes */ |
101 | int arcnet_debug = ARCNET_DEBUG; |
102 | EXPORT_SYMBOL(arcnet_debug); |
103 | |
104 | /* Internal function prototypes */ |
105 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, |
106 | unsigned short type, const void *daddr, |
107 | const void *saddr, unsigned len); |
108 | static int go_tx(struct net_device *dev); |
109 | |
110 | static int debug = ARCNET_DEBUG; |
111 | module_param(debug, int, 0); |
112 | MODULE_DESCRIPTION("ARCnet core driver"); |
113 | MODULE_LICENSE("GPL"); |
114 | |
115 | static int __init arcnet_init(void) |
116 | { |
117 | int count; |
118 | |
119 | arcnet_debug = debug; |
120 | |
121 | pr_info("arcnet loaded\n"); |
122 | |
123 | /* initialize the protocol map */ |
124 | arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null; |
125 | for (count = 0; count < 256; count++) |
126 | arc_proto_map[count] = arc_proto_default; |
127 | |
128 | if (BUGLVL(D_DURING)) |
129 | pr_info("struct sizes: %zd %zd %zd %zd %zd\n", |
130 | sizeof(struct arc_hardware), |
131 | sizeof(struct arc_rfc1201), |
132 | sizeof(struct arc_rfc1051), |
133 | sizeof(struct arc_eth_encap), |
134 | sizeof(struct archdr)); |
135 | |
136 | return 0; |
137 | } |
138 | |
139 | static void __exit arcnet_exit(void) |
140 | { |
141 | } |
142 | |
143 | module_init(arcnet_init); |
144 | module_exit(arcnet_exit); |
145 | |
146 | /* Dump the contents of an sk_buff */ |
147 | #if ARCNET_DEBUG_MAX & D_SKB |
148 | void arcnet_dump_skb(struct net_device *dev, |
149 | struct sk_buff *skb, char *desc) |
150 | { |
151 | char hdr[32]; |
152 | |
153 | /* dump the packet */ |
154 | snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc); |
155 | print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, |
156 | 16, 1, skb->data, skb->len, true); |
157 | } |
158 | EXPORT_SYMBOL(arcnet_dump_skb); |
159 | #endif |
160 | |
161 | /* Dump the contents of an ARCnet buffer */ |
162 | #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) |
163 | static void arcnet_dump_packet(struct net_device *dev, int bufnum, |
164 | char *desc, int take_arcnet_lock) |
165 | { |
166 | struct arcnet_local *lp = netdev_priv(dev); |
167 | int i, length; |
168 | unsigned long flags = 0; |
169 | static uint8_t buf[512]; |
170 | char hdr[32]; |
171 | |
172 | /* hw.copy_from_card expects IRQ context so take the IRQ lock |
173 | * to keep it single threaded |
174 | */ |
175 | if (take_arcnet_lock) |
176 | spin_lock_irqsave(&lp->lock, flags); |
177 | |
178 | lp->hw.copy_from_card(dev, bufnum, 0, buf, 512); |
179 | if (take_arcnet_lock) |
180 | spin_unlock_irqrestore(&lp->lock, flags); |
181 | |
182 | /* if the offset[0] byte is nonzero, this is a 256-byte packet */ |
183 | length = (buf[2] ? 256 : 512); |
184 | |
185 | /* dump the packet */ |
186 | snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc); |
187 | print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, |
188 | 16, 1, buf, length, true); |
189 | } |
190 | |
191 | #else |
192 | |
193 | #define arcnet_dump_packet(dev, bufnum, desc, take_arcnet_lock) do { } while (0) |
194 | |
195 | #endif |
196 | |
197 | /* Trigger a LED event in response to a ARCNET device event */ |
198 | void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event) |
199 | { |
200 | struct arcnet_local *lp = netdev_priv(dev); |
201 | |
202 | switch (event) { |
203 | case ARCNET_LED_EVENT_RECON: |
204 | led_trigger_blink_oneshot(trigger: lp->recon_led_trig, delay_on: 350, delay_off: 350, invert: 0); |
205 | break; |
206 | case ARCNET_LED_EVENT_OPEN: |
207 | led_trigger_event(trigger: lp->tx_led_trig, event: LED_OFF); |
208 | led_trigger_event(trigger: lp->recon_led_trig, event: LED_OFF); |
209 | break; |
210 | case ARCNET_LED_EVENT_STOP: |
211 | led_trigger_event(trigger: lp->tx_led_trig, event: LED_OFF); |
212 | led_trigger_event(trigger: lp->recon_led_trig, event: LED_OFF); |
213 | break; |
214 | case ARCNET_LED_EVENT_TX: |
215 | led_trigger_blink_oneshot(trigger: lp->tx_led_trig, delay_on: 50, delay_off: 50, invert: 0); |
216 | break; |
217 | } |
218 | } |
219 | EXPORT_SYMBOL_GPL(arcnet_led_event); |
220 | |
221 | static void arcnet_led_release(struct device *gendev, void *res) |
222 | { |
223 | struct arcnet_local *lp = netdev_priv(to_net_dev(gendev)); |
224 | |
225 | led_trigger_unregister_simple(trigger: lp->tx_led_trig); |
226 | led_trigger_unregister_simple(trigger: lp->recon_led_trig); |
227 | } |
228 | |
229 | /* Register ARCNET LED triggers for a arcnet device |
230 | * |
231 | * This is normally called from a driver's probe function |
232 | */ |
233 | void devm_arcnet_led_init(struct net_device *netdev, int index, int subid) |
234 | { |
235 | struct arcnet_local *lp = netdev_priv(dev: netdev); |
236 | void *res; |
237 | |
238 | res = devres_alloc(arcnet_led_release, 0, GFP_KERNEL); |
239 | if (!res) { |
240 | netdev_err(dev: netdev, format: "cannot register LED triggers\n"); |
241 | return; |
242 | } |
243 | |
244 | snprintf(buf: lp->tx_led_trig_name, size: sizeof(lp->tx_led_trig_name), |
245 | fmt: "arc%d-%d-tx", index, subid); |
246 | snprintf(buf: lp->recon_led_trig_name, size: sizeof(lp->recon_led_trig_name), |
247 | fmt: "arc%d-%d-recon", index, subid); |
248 | |
249 | led_trigger_register_simple(name: lp->tx_led_trig_name, |
250 | trigger: &lp->tx_led_trig); |
251 | led_trigger_register_simple(name: lp->recon_led_trig_name, |
252 | trigger: &lp->recon_led_trig); |
253 | |
254 | devres_add(dev: &netdev->dev, res); |
255 | } |
256 | EXPORT_SYMBOL_GPL(devm_arcnet_led_init); |
257 | |
258 | /* Unregister a protocol driver from the arc_proto_map. Protocol drivers |
259 | * are responsible for registering themselves, but the unregister routine |
260 | * is pretty generic so we'll do it here. |
261 | */ |
262 | void arcnet_unregister_proto(struct ArcProto *proto) |
263 | { |
264 | int count; |
265 | |
266 | if (arc_proto_default == proto) |
267 | arc_proto_default = &arc_proto_null; |
268 | if (arc_bcast_proto == proto) |
269 | arc_bcast_proto = arc_proto_default; |
270 | if (arc_raw_proto == proto) |
271 | arc_raw_proto = arc_proto_default; |
272 | |
273 | for (count = 0; count < 256; count++) { |
274 | if (arc_proto_map[count] == proto) |
275 | arc_proto_map[count] = arc_proto_default; |
276 | } |
277 | } |
278 | EXPORT_SYMBOL(arcnet_unregister_proto); |
279 | |
280 | /* Add a buffer to the queue. Only the interrupt handler is allowed to do |
281 | * this, unless interrupts are disabled. |
282 | * |
283 | * Note: we don't check for a full queue, since there aren't enough buffers |
284 | * to more than fill it. |
285 | */ |
286 | static void release_arcbuf(struct net_device *dev, int bufnum) |
287 | { |
288 | struct arcnet_local *lp = netdev_priv(dev); |
289 | int i; |
290 | |
291 | lp->buf_queue[lp->first_free_buf++] = bufnum; |
292 | lp->first_free_buf %= 5; |
293 | |
294 | if (BUGLVL(D_DURING)) { |
295 | arc_printk(D_DURING, dev, "release_arcbuf: freed #%d; buffer queue is now: ", |
296 | bufnum); |
297 | for (i = lp->next_buf; i != lp->first_free_buf; i = (i + 1) % 5) |
298 | arc_cont(D_DURING, "#%d ", lp->buf_queue[i]); |
299 | arc_cont(D_DURING, "\n"); |
300 | } |
301 | } |
302 | |
303 | /* Get a buffer from the queue. |
304 | * If this returns -1, there are no buffers available. |
305 | */ |
306 | static int get_arcbuf(struct net_device *dev) |
307 | { |
308 | struct arcnet_local *lp = netdev_priv(dev); |
309 | int buf = -1, i; |
310 | |
311 | if (!atomic_dec_and_test(v: &lp->buf_lock)) { |
312 | /* already in this function */ |
313 | arc_printk(D_NORMAL, dev, "get_arcbuf: overlap (%d)!\n", |
314 | lp->buf_lock.counter); |
315 | } else { /* we can continue */ |
316 | if (lp->next_buf >= 5) |
317 | lp->next_buf -= 5; |
318 | |
319 | if (lp->next_buf == lp->first_free_buf) { |
320 | arc_printk(D_NORMAL, dev, "get_arcbuf: BUG: no buffers are available??\n"); |
321 | } else { |
322 | buf = lp->buf_queue[lp->next_buf++]; |
323 | lp->next_buf %= 5; |
324 | } |
325 | } |
326 | |
327 | if (BUGLVL(D_DURING)) { |
328 | arc_printk(D_DURING, dev, "get_arcbuf: got #%d; buffer queue is now: ", |
329 | buf); |
330 | for (i = lp->next_buf; i != lp->first_free_buf; i = (i + 1) % 5) |
331 | arc_cont(D_DURING, "#%d ", lp->buf_queue[i]); |
332 | arc_cont(D_DURING, "\n"); |
333 | } |
334 | |
335 | atomic_inc(v: &lp->buf_lock); |
336 | return buf; |
337 | } |
338 | |
339 | static int choose_mtu(void) |
340 | { |
341 | int count, mtu = 65535; |
342 | |
343 | /* choose the smallest MTU of all available encaps */ |
344 | for (count = 0; count < 256; count++) { |
345 | if (arc_proto_map[count] != &arc_proto_null && |
346 | arc_proto_map[count]->mtu < mtu) { |
347 | mtu = arc_proto_map[count]->mtu; |
348 | } |
349 | } |
350 | |
351 | return mtu == 65535 ? XMTU : mtu; |
352 | } |
353 | |
354 | static const struct header_ops arcnet_header_ops = { |
355 | .create = arcnet_header, |
356 | }; |
357 | |
358 | static const struct net_device_ops arcnet_netdev_ops = { |
359 | .ndo_open = arcnet_open, |
360 | .ndo_stop = arcnet_close, |
361 | .ndo_start_xmit = arcnet_send_packet, |
362 | .ndo_tx_timeout = arcnet_timeout, |
363 | }; |
364 | |
365 | /* Setup a struct device for ARCnet. */ |
366 | static void arcdev_setup(struct net_device *dev) |
367 | { |
368 | dev->type = ARPHRD_ARCNET; |
369 | dev->netdev_ops = &arcnet_netdev_ops; |
370 | dev->header_ops = &arcnet_header_ops; |
371 | dev->hard_header_len = sizeof(struct arc_hardware); |
372 | dev->mtu = choose_mtu(); |
373 | |
374 | dev->addr_len = ARCNET_ALEN; |
375 | dev->tx_queue_len = 100; |
376 | dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */ |
377 | dev->watchdog_timeo = TX_TIMEOUT; |
378 | |
379 | /* New-style flags. */ |
380 | dev->flags = IFF_BROADCAST; |
381 | } |
382 | |
383 | static void arcnet_timer(struct timer_list *t) |
384 | { |
385 | struct arcnet_local *lp = timer_container_of(lp, t, timer); |
386 | struct net_device *dev = lp->dev; |
387 | |
388 | spin_lock_irq(lock: &lp->lock); |
389 | |
390 | if (!lp->reset_in_progress && !netif_carrier_ok(dev)) { |
391 | netif_carrier_on(dev); |
392 | netdev_info(dev, format: "link up\n"); |
393 | } |
394 | |
395 | spin_unlock_irq(lock: &lp->lock); |
396 | } |
397 | |
398 | static void reset_device_work(struct work_struct *work) |
399 | { |
400 | struct arcnet_local *lp; |
401 | struct net_device *dev; |
402 | |
403 | lp = container_of(work, struct arcnet_local, reset_work); |
404 | dev = lp->dev; |
405 | |
406 | /* Do not bring the network interface back up if an ifdown |
407 | * was already done. |
408 | */ |
409 | if (!netif_running(dev) || !lp->reset_in_progress) |
410 | return; |
411 | |
412 | rtnl_lock(); |
413 | |
414 | /* Do another check, in case of an ifdown that was triggered in |
415 | * the small race window between the exit condition above and |
416 | * acquiring RTNL. |
417 | */ |
418 | if (!netif_running(dev) || !lp->reset_in_progress) |
419 | goto out; |
420 | |
421 | dev_close(dev); |
422 | dev_open(dev, NULL); |
423 | |
424 | out: |
425 | rtnl_unlock(); |
426 | } |
427 | |
428 | static void arcnet_reply_work(struct work_struct *t) |
429 | { |
430 | struct arcnet_local *lp = from_work(lp, t, reply_work); |
431 | |
432 | struct sk_buff *ackskb, *skb; |
433 | struct sock_exterr_skb *serr; |
434 | struct sock *sk; |
435 | int ret; |
436 | |
437 | local_irq_disable(); |
438 | skb = lp->outgoing.skb; |
439 | if (!skb || !skb->sk) { |
440 | local_irq_enable(); |
441 | return; |
442 | } |
443 | |
444 | sock_hold(sk: skb->sk); |
445 | sk = skb->sk; |
446 | ackskb = skb_clone_sk(skb); |
447 | sock_put(sk: skb->sk); |
448 | |
449 | if (!ackskb) { |
450 | local_irq_enable(); |
451 | return; |
452 | } |
453 | |
454 | serr = SKB_EXT_ERR(ackskb); |
455 | memset(serr, 0, sizeof(*serr)); |
456 | serr->ee.ee_errno = ENOMSG; |
457 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
458 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
459 | serr->ee.ee_info = lp->reply_status; |
460 | |
461 | /* finally erasing outgoing skb */ |
462 | dev_kfree_skb(lp->outgoing.skb); |
463 | lp->outgoing.skb = NULL; |
464 | |
465 | ackskb->dev = lp->dev; |
466 | |
467 | ret = sock_queue_err_skb(sk, skb: ackskb); |
468 | if (ret) |
469 | dev_kfree_skb_irq(skb: ackskb); |
470 | |
471 | local_irq_enable(); |
472 | }; |
473 | |
474 | struct net_device *alloc_arcdev(const char *name) |
475 | { |
476 | struct net_device *dev; |
477 | |
478 | dev = alloc_netdev(sizeof(struct arcnet_local), |
479 | name && *name ? name : "arc%d", NET_NAME_UNKNOWN, |
480 | arcdev_setup); |
481 | if (dev) { |
482 | struct arcnet_local *lp = netdev_priv(dev); |
483 | |
484 | lp->dev = dev; |
485 | spin_lock_init(&lp->lock); |
486 | timer_setup(&lp->timer, arcnet_timer, 0); |
487 | INIT_WORK(&lp->reset_work, reset_device_work); |
488 | } |
489 | |
490 | return dev; |
491 | } |
492 | EXPORT_SYMBOL(alloc_arcdev); |
493 | |
494 | void free_arcdev(struct net_device *dev) |
495 | { |
496 | struct arcnet_local *lp = netdev_priv(dev); |
497 | |
498 | /* Do not cancel this at ->ndo_close(), as the workqueue itself |
499 | * indirectly calls the ifdown path through dev_close(). |
500 | */ |
501 | cancel_work_sync(work: &lp->reset_work); |
502 | free_netdev(dev); |
503 | } |
504 | EXPORT_SYMBOL(free_arcdev); |
505 | |
506 | /* Open/initialize the board. This is called sometime after booting when |
507 | * the 'ifconfig' program is run. |
508 | * |
509 | * This routine should set everything up anew at each open, even registers |
510 | * that "should" only need to be set once at boot, so that there is |
511 | * non-reboot way to recover if something goes wrong. |
512 | */ |
513 | int arcnet_open(struct net_device *dev) |
514 | { |
515 | struct arcnet_local *lp = netdev_priv(dev); |
516 | int count, newmtu, error; |
517 | |
518 | arc_printk(D_INIT, dev, "opened."); |
519 | |
520 | if (!try_module_get(module: lp->hw.owner)) |
521 | return -ENODEV; |
522 | |
523 | if (BUGLVL(D_PROTO)) { |
524 | arc_printk(D_PROTO, dev, "protocol map (default is '%c'): ", |
525 | arc_proto_default->suffix); |
526 | for (count = 0; count < 256; count++) |
527 | arc_cont(D_PROTO, "%c", arc_proto_map[count]->suffix); |
528 | arc_cont(D_PROTO, "\n"); |
529 | } |
530 | |
531 | INIT_WORK(&lp->reply_work, arcnet_reply_work); |
532 | |
533 | arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n"); |
534 | |
535 | /* try to put the card in a defined state - if it fails the first |
536 | * time, actually reset it. |
537 | */ |
538 | error = -ENODEV; |
539 | if (lp->hw.reset(dev, 0) && lp->hw.reset(dev, 1)) |
540 | goto out_module_put; |
541 | |
542 | newmtu = choose_mtu(); |
543 | if (newmtu < dev->mtu) |
544 | dev->mtu = newmtu; |
545 | |
546 | arc_printk(D_INIT, dev, "arcnet_open: mtu: %d.\n", dev->mtu); |
547 | |
548 | /* autodetect the encapsulation for each host. */ |
549 | memset(lp->default_proto, 0, sizeof(lp->default_proto)); |
550 | |
551 | /* the broadcast address is special - use the 'bcast' protocol */ |
552 | for (count = 0; count < 256; count++) { |
553 | if (arc_proto_map[count] == arc_bcast_proto) { |
554 | lp->default_proto[0] = count; |
555 | break; |
556 | } |
557 | } |
558 | |
559 | /* initialize buffers */ |
560 | atomic_set(v: &lp->buf_lock, i: 1); |
561 | |
562 | lp->next_buf = lp->first_free_buf = 0; |
563 | release_arcbuf(dev, bufnum: 0); |
564 | release_arcbuf(dev, bufnum: 1); |
565 | release_arcbuf(dev, bufnum: 2); |
566 | release_arcbuf(dev, bufnum: 3); |
567 | lp->cur_tx = lp->next_tx = -1; |
568 | lp->cur_rx = -1; |
569 | |
570 | lp->rfc1201.sequence = 1; |
571 | |
572 | /* bring up the hardware driver */ |
573 | if (lp->hw.open) |
574 | lp->hw.open(dev); |
575 | |
576 | if (dev->dev_addr[0] == 0) |
577 | arc_printk(D_NORMAL, dev, "WARNING! Station address 00 is reserved for broadcasts!\n"); |
578 | else if (dev->dev_addr[0] == 255) |
579 | arc_printk(D_NORMAL, dev, "WARNING! Station address FF may confuse DOS networking programs!\n"); |
580 | |
581 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); |
582 | if (lp->hw.status(dev) & RESETflag) { |
583 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", |
584 | __FILE__, __LINE__, __func__); |
585 | lp->hw.command(dev, CFLAGScmd | RESETclear); |
586 | } |
587 | |
588 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); |
589 | /* make sure we're ready to receive IRQ's. */ |
590 | lp->hw.intmask(dev, 0); |
591 | udelay(usec: 1); /* give it time to set the mask before |
592 | * we reset it again. (may not even be |
593 | * necessary) |
594 | */ |
595 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); |
596 | lp->intmask = NORXflag | RECONflag; |
597 | lp->hw.intmask(dev, lp->intmask); |
598 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); |
599 | |
600 | netif_carrier_off(dev); |
601 | netif_start_queue(dev); |
602 | mod_timer(timer: &lp->timer, expires: jiffies + msecs_to_jiffies(m: 1000)); |
603 | |
604 | arcnet_led_event(dev, ARCNET_LED_EVENT_OPEN); |
605 | return 0; |
606 | |
607 | out_module_put: |
608 | module_put(module: lp->hw.owner); |
609 | return error; |
610 | } |
611 | EXPORT_SYMBOL(arcnet_open); |
612 | |
613 | /* The inverse routine to arcnet_open - shuts down the card. */ |
614 | int arcnet_close(struct net_device *dev) |
615 | { |
616 | struct arcnet_local *lp = netdev_priv(dev); |
617 | |
618 | arcnet_led_event(dev, ARCNET_LED_EVENT_STOP); |
619 | timer_delete_sync(timer: &lp->timer); |
620 | |
621 | netif_stop_queue(dev); |
622 | netif_carrier_off(dev); |
623 | |
624 | cancel_work_sync(work: &lp->reply_work); |
625 | |
626 | /* flush TX and disable RX */ |
627 | lp->hw.intmask(dev, 0); |
628 | lp->hw.command(dev, NOTXcmd); /* stop transmit */ |
629 | lp->hw.command(dev, NORXcmd); /* disable receive */ |
630 | mdelay(1); |
631 | |
632 | /* shut down the card */ |
633 | lp->hw.close(dev); |
634 | |
635 | /* reset counters */ |
636 | lp->reset_in_progress = 0; |
637 | |
638 | module_put(module: lp->hw.owner); |
639 | return 0; |
640 | } |
641 | EXPORT_SYMBOL(arcnet_close); |
642 | |
643 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, |
644 | unsigned short type, const void *daddr, |
645 | const void *saddr, unsigned len) |
646 | { |
647 | const struct arcnet_local *lp = netdev_priv(dev); |
648 | uint8_t _daddr, proto_num; |
649 | struct ArcProto *proto; |
650 | |
651 | arc_printk(D_DURING, dev, |
652 | "create header from %d to %d; protocol %d (%Xh); size %u.\n", |
653 | saddr ? *(uint8_t *)saddr : -1, |
654 | daddr ? *(uint8_t *)daddr : -1, |
655 | type, type, len); |
656 | |
657 | if (skb->len != 0 && len != skb->len) |
658 | arc_printk(D_NORMAL, dev, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n", |
659 | skb->len, len); |
660 | |
661 | /* Type is host order - ? */ |
662 | if (type == ETH_P_ARCNET) { |
663 | proto = arc_raw_proto; |
664 | arc_printk(D_DEBUG, dev, "arc_raw_proto used. proto='%c'\n", |
665 | proto->suffix); |
666 | _daddr = daddr ? *(uint8_t *)daddr : 0; |
667 | } else if (!daddr) { |
668 | /* if the dest addr isn't provided, we can't choose an |
669 | * encapsulation! Store the packet type (eg. ETH_P_IP) |
670 | * for now, and we'll push on a real header when we do |
671 | * rebuild_header. |
672 | */ |
673 | *(uint16_t *)skb_push(skb, len: 2) = type; |
674 | /* XXX: Why not use skb->mac_len? */ |
675 | if (skb->network_header - skb->mac_header != 2) |
676 | arc_printk(D_NORMAL, dev, "arcnet_header: Yikes! diff (%u) is not 2!\n", |
677 | skb->network_header - skb->mac_header); |
678 | return -2; /* return error -- can't transmit yet! */ |
679 | } else { |
680 | /* otherwise, we can just add the header as usual. */ |
681 | _daddr = *(uint8_t *)daddr; |
682 | proto_num = lp->default_proto[_daddr]; |
683 | proto = arc_proto_map[proto_num]; |
684 | arc_printk(D_DURING, dev, "building header for %02Xh using protocol '%c'\n", |
685 | proto_num, proto->suffix); |
686 | if (proto == &arc_proto_null && arc_bcast_proto != proto) { |
687 | arc_printk(D_DURING, dev, "actually, let's use '%c' instead.\n", |
688 | arc_bcast_proto->suffix); |
689 | proto = arc_bcast_proto; |
690 | } |
691 | } |
692 | return proto->build_header(skb, dev, type, _daddr); |
693 | } |
694 | |
695 | /* Called by the kernel in order to transmit a packet. */ |
696 | netdev_tx_t arcnet_send_packet(struct sk_buff *skb, |
697 | struct net_device *dev) |
698 | { |
699 | struct arcnet_local *lp = netdev_priv(dev); |
700 | struct archdr *pkt; |
701 | struct arc_rfc1201 *soft; |
702 | struct ArcProto *proto; |
703 | int txbuf; |
704 | unsigned long flags; |
705 | int retval; |
706 | |
707 | arc_printk(D_DURING, dev, |
708 | "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", |
709 | lp->hw.status(dev), lp->cur_tx, lp->next_tx, skb->len, skb->protocol); |
710 | |
711 | pkt = (struct archdr *)skb->data; |
712 | soft = &pkt->soft.rfc1201; |
713 | proto = arc_proto_map[soft->proto]; |
714 | |
715 | arc_printk(D_SKB_SIZE, dev, "skb: transmitting %d bytes to %02X\n", |
716 | skb->len, pkt->hard.dest); |
717 | if (BUGLVL(D_SKB)) |
718 | arcnet_dump_skb(dev, skb, desc: "tx"); |
719 | |
720 | /* fits in one packet? */ |
721 | if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) { |
722 | arc_printk(D_NORMAL, dev, "fixme: packet too large: compensating badly!\n"); |
723 | dev_kfree_skb(skb); |
724 | return NETDEV_TX_OK; /* don't try again */ |
725 | } |
726 | |
727 | /* We're busy transmitting a packet... */ |
728 | netif_stop_queue(dev); |
729 | |
730 | spin_lock_irqsave(&lp->lock, flags); |
731 | lp->hw.intmask(dev, 0); |
732 | if (lp->next_tx == -1) |
733 | txbuf = get_arcbuf(dev); |
734 | else |
735 | txbuf = -1; |
736 | |
737 | if (txbuf != -1) { |
738 | lp->outgoing.skb = skb; |
739 | if (proto->prepare_tx(dev, pkt, skb->len, txbuf) && |
740 | !proto->ack_tx) { |
741 | /* done right away and we don't want to acknowledge |
742 | * the package later - forget about it now |
743 | */ |
744 | dev->stats.tx_bytes += skb->len; |
745 | } else { |
746 | /* do it the 'split' way */ |
747 | lp->outgoing.proto = proto; |
748 | lp->outgoing.skb = skb; |
749 | lp->outgoing.pkt = pkt; |
750 | |
751 | if (proto->continue_tx && |
752 | proto->continue_tx(dev, txbuf)) { |
753 | arc_printk(D_NORMAL, dev, |
754 | "bug! continue_tx finished the first time! (proto='%c')\n", |
755 | proto->suffix); |
756 | } |
757 | } |
758 | retval = NETDEV_TX_OK; |
759 | lp->next_tx = txbuf; |
760 | } else { |
761 | retval = NETDEV_TX_BUSY; |
762 | } |
763 | |
764 | arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n", |
765 | __FILE__, __LINE__, __func__, lp->hw.status(dev)); |
766 | /* make sure we didn't ignore a TX IRQ while we were in here */ |
767 | lp->hw.intmask(dev, 0); |
768 | |
769 | arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); |
770 | lp->intmask |= TXFREEflag | EXCNAKflag; |
771 | lp->hw.intmask(dev, lp->intmask); |
772 | arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n", |
773 | __FILE__, __LINE__, __func__, lp->hw.status(dev)); |
774 | |
775 | arcnet_led_event(dev, ARCNET_LED_EVENT_TX); |
776 | |
777 | spin_unlock_irqrestore(lock: &lp->lock, flags); |
778 | return retval; /* no need to try again */ |
779 | } |
780 | EXPORT_SYMBOL(arcnet_send_packet); |
781 | |
782 | /* Actually start transmitting a packet that was loaded into a buffer |
783 | * by prepare_tx. This should _only_ be called by the interrupt handler. |
784 | */ |
785 | static int go_tx(struct net_device *dev) |
786 | { |
787 | struct arcnet_local *lp = netdev_priv(dev); |
788 | |
789 | arc_printk(D_DURING, dev, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n", |
790 | lp->hw.status(dev), lp->intmask, lp->next_tx, lp->cur_tx); |
791 | |
792 | if (lp->cur_tx != -1 || lp->next_tx == -1) |
793 | return 0; |
794 | |
795 | if (BUGLVL(D_TX)) |
796 | arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0); |
797 | |
798 | lp->cur_tx = lp->next_tx; |
799 | lp->next_tx = -1; |
800 | |
801 | /* start sending */ |
802 | lp->hw.command(dev, TXcmd | (lp->cur_tx << 3)); |
803 | |
804 | dev->stats.tx_packets++; |
805 | lp->lasttrans_dest = lp->lastload_dest; |
806 | lp->lastload_dest = 0; |
807 | lp->excnak_pending = 0; |
808 | lp->intmask |= TXFREEflag | EXCNAKflag; |
809 | |
810 | return 1; |
811 | } |
812 | |
813 | /* Called by the kernel when transmit times out */ |
814 | void arcnet_timeout(struct net_device *dev, unsigned int txqueue) |
815 | { |
816 | unsigned long flags; |
817 | struct arcnet_local *lp = netdev_priv(dev); |
818 | int status = lp->hw.status(dev); |
819 | char *msg; |
820 | |
821 | spin_lock_irqsave(&lp->lock, flags); |
822 | if (status & TXFREEflag) { /* transmit _DID_ finish */ |
823 | msg = " - missed IRQ?"; |
824 | } else { |
825 | msg = ""; |
826 | dev->stats.tx_aborted_errors++; |
827 | lp->timed_out = 1; |
828 | lp->hw.command(dev, NOTXcmd | (lp->cur_tx << 3)); |
829 | } |
830 | dev->stats.tx_errors++; |
831 | |
832 | /* make sure we didn't miss a TX or a EXC NAK IRQ */ |
833 | lp->hw.intmask(dev, 0); |
834 | lp->intmask |= TXFREEflag | EXCNAKflag; |
835 | lp->hw.intmask(dev, lp->intmask); |
836 | |
837 | spin_unlock_irqrestore(lock: &lp->lock, flags); |
838 | |
839 | if (time_after(jiffies, lp->last_timeout + 10 * HZ)) { |
840 | arc_printk(D_EXTRA, dev, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", |
841 | msg, status, lp->intmask, lp->lasttrans_dest); |
842 | lp->last_timeout = jiffies; |
843 | } |
844 | |
845 | if (lp->cur_tx == -1) |
846 | netif_wake_queue(dev); |
847 | } |
848 | EXPORT_SYMBOL(arcnet_timeout); |
849 | |
850 | /* The typical workload of the driver: Handle the network interface |
851 | * interrupts. Establish which device needs attention, and call the correct |
852 | * chipset interrupt handler. |
853 | */ |
854 | irqreturn_t arcnet_interrupt(int irq, void *dev_id) |
855 | { |
856 | struct net_device *dev = dev_id; |
857 | struct arcnet_local *lp; |
858 | int recbuf, status, diagstatus, didsomething, boguscount; |
859 | unsigned long flags; |
860 | int retval = IRQ_NONE; |
861 | |
862 | arc_printk(D_DURING, dev, "\n"); |
863 | |
864 | arc_printk(D_DURING, dev, "in arcnet_interrupt\n"); |
865 | |
866 | lp = netdev_priv(dev); |
867 | BUG_ON(!lp); |
868 | |
869 | spin_lock_irqsave(&lp->lock, flags); |
870 | |
871 | if (lp->reset_in_progress) |
872 | goto out; |
873 | |
874 | /* RESET flag was enabled - if device is not running, we must |
875 | * clear it right away (but nothing else). |
876 | */ |
877 | if (!netif_running(dev)) { |
878 | if (lp->hw.status(dev) & RESETflag) |
879 | lp->hw.command(dev, CFLAGScmd | RESETclear); |
880 | lp->hw.intmask(dev, 0); |
881 | spin_unlock_irqrestore(lock: &lp->lock, flags); |
882 | return retval; |
883 | } |
884 | |
885 | arc_printk(D_DURING, dev, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n", |
886 | lp->hw.status(dev), lp->intmask); |
887 | |
888 | boguscount = 5; |
889 | do { |
890 | status = lp->hw.status(dev); |
891 | diagstatus = (status >> 8) & 0xFF; |
892 | |
893 | arc_printk(D_DEBUG, dev, "%s: %d: %s: status=%x\n", |
894 | __FILE__, __LINE__, __func__, status); |
895 | didsomething = 0; |
896 | |
897 | /* RESET flag was enabled - card is resetting and if RX is |
898 | * disabled, it's NOT because we just got a packet. |
899 | * |
900 | * The card is in an undefined state. |
901 | * Clear it out and start over. |
902 | */ |
903 | if (status & RESETflag) { |
904 | arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n", |
905 | status); |
906 | |
907 | lp->reset_in_progress = 1; |
908 | netif_stop_queue(dev); |
909 | netif_carrier_off(dev); |
910 | schedule_work(work: &lp->reset_work); |
911 | |
912 | /* get out of the interrupt handler! */ |
913 | goto out; |
914 | } |
915 | /* RX is inhibited - we must have received something. |
916 | * Prepare to receive into the next buffer. |
917 | * |
918 | * We don't actually copy the received packet from the card |
919 | * until after the transmit handler runs (and possibly |
920 | * launches the next tx); this should improve latency slightly |
921 | * if we get both types of interrupts at once. |
922 | */ |
923 | recbuf = -1; |
924 | if (status & lp->intmask & NORXflag) { |
925 | recbuf = lp->cur_rx; |
926 | arc_printk(D_DURING, dev, "Buffer #%d: receive irq (status=%Xh)\n", |
927 | recbuf, status); |
928 | |
929 | lp->cur_rx = get_arcbuf(dev); |
930 | if (lp->cur_rx != -1) { |
931 | arc_printk(D_DURING, dev, "enabling receive to buffer #%d\n", |
932 | lp->cur_rx); |
933 | lp->hw.command(dev, RXcmd | (lp->cur_rx << 3) | RXbcasts); |
934 | } |
935 | didsomething++; |
936 | } |
937 | |
938 | if ((diagstatus & EXCNAKflag)) { |
939 | arc_printk(D_DURING, dev, "EXCNAK IRQ (diagstat=%Xh)\n", |
940 | diagstatus); |
941 | |
942 | lp->hw.command(dev, NOTXcmd); /* disable transmit */ |
943 | lp->excnak_pending = 1; |
944 | |
945 | lp->hw.command(dev, EXCNAKclear); |
946 | lp->intmask &= ~(EXCNAKflag); |
947 | didsomething++; |
948 | } |
949 | |
950 | /* a transmit finished, and we're interested in it. */ |
951 | if ((status & lp->intmask & TXFREEflag) || lp->timed_out) { |
952 | int ackstatus; |
953 | lp->intmask &= ~(TXFREEflag | EXCNAKflag); |
954 | |
955 | if (status & TXACKflag) |
956 | ackstatus = 2; |
957 | else if (lp->excnak_pending) |
958 | ackstatus = 1; |
959 | else |
960 | ackstatus = 0; |
961 | |
962 | arc_printk(D_DURING, dev, "TX IRQ (stat=%Xh)\n", |
963 | status); |
964 | |
965 | if (lp->cur_tx != -1 && !lp->timed_out) { |
966 | if (!(status & TXACKflag)) { |
967 | if (lp->lasttrans_dest != 0) { |
968 | arc_printk(D_EXTRA, dev, |
969 | "transmit was not acknowledged! (status=%Xh, dest=%02Xh)\n", |
970 | status, |
971 | lp->lasttrans_dest); |
972 | dev->stats.tx_errors++; |
973 | dev->stats.tx_carrier_errors++; |
974 | } else { |
975 | arc_printk(D_DURING, dev, |
976 | "broadcast was not acknowledged; that's normal (status=%Xh, dest=%02Xh)\n", |
977 | status, |
978 | lp->lasttrans_dest); |
979 | } |
980 | } |
981 | |
982 | if (lp->outgoing.proto && |
983 | lp->outgoing.proto->ack_tx) { |
984 | lp->outgoing.proto |
985 | ->ack_tx(dev, ackstatus); |
986 | } |
987 | lp->reply_status = ackstatus; |
988 | queue_work(wq: system_bh_highpri_wq, work: &lp->reply_work); |
989 | } |
990 | if (lp->cur_tx != -1) |
991 | release_arcbuf(dev, bufnum: lp->cur_tx); |
992 | |
993 | lp->cur_tx = -1; |
994 | lp->timed_out = 0; |
995 | didsomething++; |
996 | |
997 | /* send another packet if there is one */ |
998 | go_tx(dev); |
999 | |
1000 | /* continue a split packet, if any */ |
1001 | if (lp->outgoing.proto && |
1002 | lp->outgoing.proto->continue_tx) { |
1003 | int txbuf = get_arcbuf(dev); |
1004 | |
1005 | if (txbuf != -1) { |
1006 | if (lp->outgoing.proto->continue_tx(dev, txbuf)) { |
1007 | /* that was the last segment */ |
1008 | dev->stats.tx_bytes += lp->outgoing.skb->len; |
1009 | if (!lp->outgoing.proto->ack_tx) { |
1010 | dev_kfree_skb_irq(skb: lp->outgoing.skb); |
1011 | lp->outgoing.proto = NULL; |
1012 | } |
1013 | } |
1014 | lp->next_tx = txbuf; |
1015 | } |
1016 | } |
1017 | /* inform upper layers of idleness, if necessary */ |
1018 | if (lp->cur_tx == -1) |
1019 | netif_wake_queue(dev); |
1020 | } |
1021 | /* now process the received packet, if any */ |
1022 | if (recbuf != -1) { |
1023 | if (BUGLVL(D_RX)) |
1024 | arcnet_dump_packet(dev, recbuf, "rx irq", 0); |
1025 | |
1026 | arcnet_rx(dev, bufnum: recbuf); |
1027 | release_arcbuf(dev, bufnum: recbuf); |
1028 | |
1029 | didsomething++; |
1030 | } |
1031 | if (status & lp->intmask & RECONflag) { |
1032 | lp->hw.command(dev, CFLAGScmd | CONFIGclear); |
1033 | dev->stats.tx_carrier_errors++; |
1034 | |
1035 | arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n", |
1036 | status); |
1037 | if (netif_carrier_ok(dev)) { |
1038 | netif_carrier_off(dev); |
1039 | netdev_info(dev, format: "link down\n"); |
1040 | } |
1041 | mod_timer(timer: &lp->timer, expires: jiffies + msecs_to_jiffies(m: 1000)); |
1042 | |
1043 | arcnet_led_event(dev, ARCNET_LED_EVENT_RECON); |
1044 | /* MYRECON bit is at bit 7 of diagstatus */ |
1045 | if (diagstatus & 0x80) |
1046 | arc_printk(D_RECON, dev, "Put out that recon myself\n"); |
1047 | |
1048 | /* is the RECON info empty or old? */ |
1049 | if (!lp->first_recon || !lp->last_recon || |
1050 | time_after(jiffies, lp->last_recon + HZ * 10)) { |
1051 | if (lp->network_down) |
1052 | arc_printk(D_NORMAL, dev, "reconfiguration detected: cabling restored?\n"); |
1053 | lp->first_recon = lp->last_recon = jiffies; |
1054 | lp->num_recons = lp->network_down = 0; |
1055 | |
1056 | arc_printk(D_DURING, dev, "recon: clearing counters.\n"); |
1057 | } else { /* add to current RECON counter */ |
1058 | lp->last_recon = jiffies; |
1059 | lp->num_recons++; |
1060 | |
1061 | arc_printk(D_DURING, dev, "recon: counter=%d, time=%lds, net=%d\n", |
1062 | lp->num_recons, |
1063 | (lp->last_recon - lp->first_recon) / HZ, |
1064 | lp->network_down); |
1065 | |
1066 | /* if network is marked up; |
1067 | * and first_recon and last_recon are 60+ apart; |
1068 | * and the average no. of recons counted is |
1069 | * > RECON_THRESHOLD/min; |
1070 | * then print a warning message. |
1071 | */ |
1072 | if (!lp->network_down && |
1073 | (lp->last_recon - lp->first_recon) <= HZ * 60 && |
1074 | lp->num_recons >= RECON_THRESHOLD) { |
1075 | lp->network_down = 1; |
1076 | arc_printk(D_NORMAL, dev, "many reconfigurations detected: cabling problem?\n"); |
1077 | } else if (!lp->network_down && |
1078 | lp->last_recon - lp->first_recon > HZ * 60) { |
1079 | /* reset counters if we've gone for |
1080 | * over a minute. |
1081 | */ |
1082 | lp->first_recon = lp->last_recon; |
1083 | lp->num_recons = 1; |
1084 | } |
1085 | } |
1086 | } else if (lp->network_down && |
1087 | time_after(jiffies, lp->last_recon + HZ * 10)) { |
1088 | if (lp->network_down) |
1089 | arc_printk(D_NORMAL, dev, "cabling restored?\n"); |
1090 | lp->first_recon = lp->last_recon = 0; |
1091 | lp->num_recons = lp->network_down = 0; |
1092 | |
1093 | arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n"); |
1094 | netif_carrier_on(dev); |
1095 | } |
1096 | |
1097 | if (didsomething) |
1098 | retval |= IRQ_HANDLED; |
1099 | } while (--boguscount && didsomething); |
1100 | |
1101 | arc_printk(D_DURING, dev, "arcnet_interrupt complete (status=%Xh, count=%d)\n", |
1102 | lp->hw.status(dev), boguscount); |
1103 | arc_printk(D_DURING, dev, "\n"); |
1104 | |
1105 | lp->hw.intmask(dev, 0); |
1106 | udelay(usec: 1); |
1107 | lp->hw.intmask(dev, lp->intmask); |
1108 | |
1109 | out: |
1110 | spin_unlock_irqrestore(lock: &lp->lock, flags); |
1111 | return retval; |
1112 | } |
1113 | EXPORT_SYMBOL(arcnet_interrupt); |
1114 | |
1115 | /* This is a generic packet receiver that calls arcnet??_rx depending on the |
1116 | * protocol ID found. |
1117 | */ |
1118 | static void arcnet_rx(struct net_device *dev, int bufnum) |
1119 | { |
1120 | struct arcnet_local *lp = netdev_priv(dev); |
1121 | union { |
1122 | struct archdr pkt; |
1123 | char buf[512]; |
1124 | } rxdata; |
1125 | struct arc_rfc1201 *soft; |
1126 | int length, ofs; |
1127 | |
1128 | soft = &rxdata.pkt.soft.rfc1201; |
1129 | |
1130 | lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE); |
1131 | if (rxdata.pkt.hard.offset[0]) { |
1132 | ofs = rxdata.pkt.hard.offset[0]; |
1133 | length = 256 - ofs; |
1134 | } else { |
1135 | ofs = rxdata.pkt.hard.offset[1]; |
1136 | length = 512 - ofs; |
1137 | } |
1138 | |
1139 | /* get the full header, if possible */ |
1140 | if (sizeof(rxdata.pkt.soft) <= length) { |
1141 | lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft)); |
1142 | } else { |
1143 | memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft)); |
1144 | lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); |
1145 | } |
1146 | |
1147 | arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n", |
1148 | bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length); |
1149 | |
1150 | dev->stats.rx_packets++; |
1151 | dev->stats.rx_bytes += length + ARC_HDR_SIZE; |
1152 | |
1153 | /* call the right receiver for the protocol */ |
1154 | if (arc_proto_map[soft->proto]->is_ip) { |
1155 | if (BUGLVL(D_PROTO)) { |
1156 | struct ArcProto |
1157 | *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]], |
1158 | *newp = arc_proto_map[soft->proto]; |
1159 | |
1160 | if (oldp != newp) { |
1161 | arc_printk(D_PROTO, dev, |
1162 | "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n", |
1163 | soft->proto, rxdata.pkt.hard.source, |
1164 | newp->suffix, oldp->suffix); |
1165 | } |
1166 | } |
1167 | |
1168 | /* broadcasts will always be done with the last-used encap. */ |
1169 | lp->default_proto[0] = soft->proto; |
1170 | |
1171 | /* in striking contrast, the following isn't a hack. */ |
1172 | lp->default_proto[rxdata.pkt.hard.source] = soft->proto; |
1173 | } |
1174 | /* call the protocol-specific receiver. */ |
1175 | arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length); |
1176 | } |
1177 | |
1178 | static void null_rx(struct net_device *dev, int bufnum, |
1179 | struct archdr *pkthdr, int length) |
1180 | { |
1181 | arc_printk(D_PROTO, dev, |
1182 | "rx: don't know how to deal with proto %02Xh from host %02Xh.\n", |
1183 | pkthdr->soft.rfc1201.proto, pkthdr->hard.source); |
1184 | } |
1185 | |
1186 | static int null_build_header(struct sk_buff *skb, struct net_device *dev, |
1187 | unsigned short type, uint8_t daddr) |
1188 | { |
1189 | struct arcnet_local *lp = netdev_priv(dev); |
1190 | |
1191 | arc_printk(D_PROTO, dev, |
1192 | "tx: can't build header for encap %02Xh; load a protocol driver.\n", |
1193 | lp->default_proto[daddr]); |
1194 | |
1195 | /* always fails */ |
1196 | return 0; |
1197 | } |
1198 | |
1199 | /* the "do nothing" prepare_tx function warns that there's nothing to do. */ |
1200 | static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, |
1201 | int length, int bufnum) |
1202 | { |
1203 | struct arcnet_local *lp = netdev_priv(dev); |
1204 | struct arc_hardware newpkt; |
1205 | |
1206 | arc_printk(D_PROTO, dev, "tx: no encap for this host; load a protocol driver.\n"); |
1207 | |
1208 | /* send a packet to myself -- will never get received, of course */ |
1209 | newpkt.source = newpkt.dest = dev->dev_addr[0]; |
1210 | |
1211 | /* only one byte of actual data (and it's random) */ |
1212 | newpkt.offset[0] = 0xFF; |
1213 | |
1214 | lp->hw.copy_to_card(dev, bufnum, 0, &newpkt, ARC_HDR_SIZE); |
1215 | |
1216 | return 1; /* done */ |
1217 | } |
1218 |
Definitions
- arc_proto_map
- arc_proto_default
- arc_bcast_proto
- arc_raw_proto
- arc_proto_null
- arcnet_debug
- debug
- arcnet_init
- arcnet_exit
- arcnet_led_event
- arcnet_led_release
- devm_arcnet_led_init
- arcnet_unregister_proto
- release_arcbuf
- get_arcbuf
- choose_mtu
- arcnet_header_ops
- arcnet_netdev_ops
- arcdev_setup
- arcnet_timer
- reset_device_work
- arcnet_reply_work
- alloc_arcdev
- free_arcdev
- arcnet_open
- arcnet_close
- arcnet_header
- arcnet_send_packet
- go_tx
- arcnet_timeout
- arcnet_interrupt
- arcnet_rx
- null_rx
- null_build_header
Improve your Profiling and Debugging skills
Find out more