1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* Copyright (C) 2021 in-tech smart charging GmbH |
3 | * |
4 | * driver is based on micrel/ks8851_spi.c |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | |
9 | #include <linux/interrupt.h> |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/ethtool.h> |
15 | #include <linux/cache.h> |
16 | #include <linux/debugfs.h> |
17 | #include <linux/seq_file.h> |
18 | |
19 | #include <linux/spi/spi.h> |
20 | #include <linux/of_net.h> |
21 | |
22 | #define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
23 | NETIF_MSG_TIMER) |
24 | |
25 | #define DRV_NAME "mse102x" |
26 | |
27 | #define DET_CMD 0x0001 |
28 | #define DET_SOF 0x0002 |
29 | #define DET_DFT 0x55AA |
30 | |
31 | #define CMD_SHIFT 12 |
32 | #define CMD_RTS (0x1 << CMD_SHIFT) |
33 | #define CMD_CTR (0x2 << CMD_SHIFT) |
34 | |
35 | #define CMD_MASK GENMASK(15, CMD_SHIFT) |
36 | #define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) |
37 | |
38 | #define DET_CMD_LEN 4 |
39 | #define DET_SOF_LEN 2 |
40 | #define DET_DFT_LEN 2 |
41 | |
42 | #define MIN_FREQ_HZ 6000000 |
43 | #define MAX_FREQ_HZ 7142857 |
44 | |
45 | struct mse102x_stats { |
46 | u64 xfer_err; |
47 | u64 invalid_cmd; |
48 | u64 invalid_ctr; |
49 | u64 invalid_dft; |
50 | u64 invalid_len; |
51 | u64 invalid_rts; |
52 | u64 invalid_sof; |
53 | u64 tx_timeout; |
54 | }; |
55 | |
56 | static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = { |
57 | "SPI transfer errors", |
58 | "Invalid command", |
59 | "Invalid CTR", |
60 | "Invalid DFT", |
61 | "Invalid frame length", |
62 | "Invalid RTS", |
63 | "Invalid SOF", |
64 | "TX timeout", |
65 | }; |
66 | |
67 | struct mse102x_net { |
68 | struct net_device *ndev; |
69 | |
70 | u8 rxd[8]; |
71 | u8 txd[8]; |
72 | |
73 | u32 msg_enable ____cacheline_aligned; |
74 | |
75 | struct sk_buff_head txq; |
76 | struct mse102x_stats stats; |
77 | }; |
78 | |
79 | struct mse102x_net_spi { |
80 | struct mse102x_net mse102x; |
81 | struct mutex lock; /* Protect SPI frame transfer */ |
82 | struct work_struct tx_work; |
83 | struct spi_device *spidev; |
84 | struct spi_message spi_msg; |
85 | struct spi_transfer spi_xfer; |
86 | |
87 | #ifdef CONFIG_DEBUG_FS |
88 | struct dentry *device_root; |
89 | #endif |
90 | }; |
91 | |
92 | #define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x) |
93 | |
94 | #ifdef CONFIG_DEBUG_FS |
95 | |
96 | static int mse102x_info_show(struct seq_file *s, void *what) |
97 | { |
98 | struct mse102x_net_spi *mses = s->private; |
99 | |
100 | seq_printf(m: s, fmt: "TX ring size : %u\n", |
101 | skb_queue_len(list_: &mses->mse102x.txq)); |
102 | |
103 | seq_printf(m: s, fmt: "IRQ : %d\n", |
104 | mses->spidev->irq); |
105 | |
106 | seq_printf(m: s, fmt: "SPI effective speed : %lu\n", |
107 | (unsigned long)mses->spi_xfer.effective_speed_hz); |
108 | seq_printf(m: s, fmt: "SPI mode : %x\n", |
109 | mses->spidev->mode); |
110 | |
111 | return 0; |
112 | } |
113 | DEFINE_SHOW_ATTRIBUTE(mse102x_info); |
114 | |
115 | static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) |
116 | { |
117 | mses->device_root = debugfs_create_dir(name: dev_name(dev: &mses->mse102x.ndev->dev), |
118 | NULL); |
119 | |
120 | debugfs_create_file(name: "info", S_IFREG | 0444, parent: mses->device_root, data: mses, |
121 | fops: &mse102x_info_fops); |
122 | } |
123 | |
124 | static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) |
125 | { |
126 | debugfs_remove_recursive(dentry: mses->device_root); |
127 | } |
128 | |
129 | #else /* CONFIG_DEBUG_FS */ |
130 | |
131 | static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) |
132 | { |
133 | } |
134 | |
135 | static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) |
136 | { |
137 | } |
138 | |
139 | #endif |
140 | |
141 | /* SPI register read/write calls. |
142 | * |
143 | * All these calls issue SPI transactions to access the chip's registers. They |
144 | * all require that the necessary lock is held to prevent accesses when the |
145 | * chip is busy transferring packet data. |
146 | */ |
147 | |
148 | static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd) |
149 | { |
150 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
151 | struct spi_transfer *xfer = &mses->spi_xfer; |
152 | struct spi_message *msg = &mses->spi_msg; |
153 | __be16 txb[2]; |
154 | int ret; |
155 | |
156 | txb[0] = cpu_to_be16(DET_CMD); |
157 | txb[1] = cpu_to_be16(cmd); |
158 | |
159 | xfer->tx_buf = txb; |
160 | xfer->rx_buf = NULL; |
161 | xfer->len = DET_CMD_LEN; |
162 | |
163 | ret = spi_sync(spi: mses->spidev, message: msg); |
164 | if (ret < 0) { |
165 | netdev_err(dev: mse->ndev, format: "%s: spi_sync() failed: %d\n", |
166 | __func__, ret); |
167 | mse->stats.xfer_err++; |
168 | } |
169 | } |
170 | |
171 | static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb) |
172 | { |
173 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
174 | struct spi_transfer *xfer = &mses->spi_xfer; |
175 | struct spi_message *msg = &mses->spi_msg; |
176 | __be16 *txb = (__be16 *)mse->txd; |
177 | __be16 *cmd = (__be16 *)mse->rxd; |
178 | u8 *trx = mse->rxd; |
179 | int ret; |
180 | |
181 | txb[0] = 0; |
182 | txb[1] = 0; |
183 | |
184 | xfer->tx_buf = txb; |
185 | xfer->rx_buf = trx; |
186 | xfer->len = DET_CMD_LEN; |
187 | |
188 | ret = spi_sync(spi: mses->spidev, message: msg); |
189 | if (ret < 0) { |
190 | netdev_err(dev: mse->ndev, format: "%s: spi_sync() failed: %d\n", |
191 | __func__, ret); |
192 | mse->stats.xfer_err++; |
193 | } else if (*cmd != cpu_to_be16(DET_CMD)) { |
194 | net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", |
195 | __func__, *cmd); |
196 | mse->stats.invalid_cmd++; |
197 | ret = -EIO; |
198 | } else { |
199 | memcpy(rxb, trx + 2, 2); |
200 | } |
201 | |
202 | return ret; |
203 | } |
204 | |
205 | static inline void mse102x_push_header(struct sk_buff *skb) |
206 | { |
207 | __be16 *header = skb_push(skb, DET_SOF_LEN); |
208 | |
209 | *header = cpu_to_be16(DET_SOF); |
210 | } |
211 | |
212 | static inline void mse102x_put_footer(struct sk_buff *skb) |
213 | { |
214 | __be16 *footer = skb_put(skb, DET_DFT_LEN); |
215 | |
216 | *footer = cpu_to_be16(DET_DFT); |
217 | } |
218 | |
219 | static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, |
220 | unsigned int pad) |
221 | { |
222 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
223 | struct spi_transfer *xfer = &mses->spi_xfer; |
224 | struct spi_message *msg = &mses->spi_msg; |
225 | struct sk_buff *tskb; |
226 | int ret; |
227 | |
228 | netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n", |
229 | __func__, txp, txp->len, txp->data); |
230 | |
231 | if ((skb_headroom(skb: txp) < DET_SOF_LEN) || |
232 | (skb_tailroom(skb: txp) < DET_DFT_LEN + pad)) { |
233 | tskb = skb_copy_expand(skb: txp, DET_SOF_LEN, DET_DFT_LEN + pad, |
234 | GFP_KERNEL); |
235 | if (!tskb) |
236 | return -ENOMEM; |
237 | |
238 | dev_kfree_skb(txp); |
239 | txp = tskb; |
240 | } |
241 | |
242 | mse102x_push_header(skb: txp); |
243 | |
244 | if (pad) |
245 | skb_put_zero(skb: txp, len: pad); |
246 | |
247 | mse102x_put_footer(skb: txp); |
248 | |
249 | xfer->tx_buf = txp->data; |
250 | xfer->rx_buf = NULL; |
251 | xfer->len = txp->len; |
252 | |
253 | ret = spi_sync(spi: mses->spidev, message: msg); |
254 | if (ret < 0) { |
255 | netdev_err(dev: mse->ndev, format: "%s: spi_sync() failed: %d\n", |
256 | __func__, ret); |
257 | mse->stats.xfer_err++; |
258 | } |
259 | |
260 | return ret; |
261 | } |
262 | |
263 | static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, |
264 | unsigned int frame_len) |
265 | { |
266 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
267 | struct spi_transfer *xfer = &mses->spi_xfer; |
268 | struct spi_message *msg = &mses->spi_msg; |
269 | __be16 *sof = (__be16 *)buff; |
270 | __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len); |
271 | int ret; |
272 | |
273 | xfer->rx_buf = buff; |
274 | xfer->tx_buf = NULL; |
275 | xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN; |
276 | |
277 | ret = spi_sync(spi: mses->spidev, message: msg); |
278 | if (ret < 0) { |
279 | netdev_err(dev: mse->ndev, format: "%s: spi_sync() failed: %d\n", |
280 | __func__, ret); |
281 | mse->stats.xfer_err++; |
282 | } else if (*sof != cpu_to_be16(DET_SOF)) { |
283 | netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", |
284 | __func__, *sof); |
285 | mse->stats.invalid_sof++; |
286 | ret = -EIO; |
287 | } else if (*dft != cpu_to_be16(DET_DFT)) { |
288 | netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n", |
289 | __func__, *dft); |
290 | mse->stats.invalid_dft++; |
291 | ret = -EIO; |
292 | } |
293 | |
294 | return ret; |
295 | } |
296 | |
297 | static void mse102x_dump_packet(const char *msg, int len, const char *data) |
298 | { |
299 | printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len); |
300 | print_hex_dump(KERN_DEBUG, prefix_str: "pk data: ", prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, |
301 | buf: data, len, ascii: true); |
302 | } |
303 | |
304 | static void mse102x_rx_pkt_spi(struct mse102x_net *mse) |
305 | { |
306 | struct sk_buff *skb; |
307 | unsigned int rxalign; |
308 | unsigned int rxlen; |
309 | __be16 rx = 0; |
310 | u16 cmd_resp; |
311 | u8 *rxpkt; |
312 | int ret; |
313 | |
314 | mse102x_tx_cmd_spi(mse, CMD_CTR); |
315 | ret = mse102x_rx_cmd_spi(mse, rxb: (u8 *)&rx); |
316 | cmd_resp = be16_to_cpu(rx); |
317 | |
318 | if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) { |
319 | usleep_range(min: 50, max: 100); |
320 | |
321 | mse102x_tx_cmd_spi(mse, CMD_CTR); |
322 | ret = mse102x_rx_cmd_spi(mse, rxb: (u8 *)&rx); |
323 | if (ret) |
324 | return; |
325 | |
326 | cmd_resp = be16_to_cpu(rx); |
327 | if ((cmd_resp & CMD_MASK) != CMD_RTS) { |
328 | net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", |
329 | __func__, cmd_resp); |
330 | mse->stats.invalid_rts++; |
331 | return; |
332 | } |
333 | |
334 | net_dbg_ratelimited("%s: Unexpected response to first CMD\n", |
335 | __func__); |
336 | } |
337 | |
338 | rxlen = cmd_resp & LEN_MASK; |
339 | if (!rxlen) { |
340 | net_dbg_ratelimited("%s: No frame length defined\n", __func__); |
341 | mse->stats.invalid_len++; |
342 | return; |
343 | } |
344 | |
345 | rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); |
346 | skb = netdev_alloc_skb_ip_align(dev: mse->ndev, length: rxalign); |
347 | if (!skb) |
348 | return; |
349 | |
350 | /* 2 bytes Start of frame (before ethernet header) |
351 | * 2 bytes Data frame tail (after ethernet frame) |
352 | * They are copied, but ignored. |
353 | */ |
354 | rxpkt = skb_put(skb, len: rxlen) - DET_SOF_LEN; |
355 | if (mse102x_rx_frame_spi(mse, buff: rxpkt, frame_len: rxlen)) { |
356 | mse->ndev->stats.rx_errors++; |
357 | dev_kfree_skb(skb); |
358 | return; |
359 | } |
360 | |
361 | if (netif_msg_pktdata(mse)) |
362 | mse102x_dump_packet(msg: __func__, len: skb->len, data: skb->data); |
363 | |
364 | skb->protocol = eth_type_trans(skb, dev: mse->ndev); |
365 | netif_rx(skb); |
366 | |
367 | mse->ndev->stats.rx_packets++; |
368 | mse->ndev->stats.rx_bytes += rxlen; |
369 | } |
370 | |
371 | static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, |
372 | unsigned long work_timeout) |
373 | { |
374 | unsigned int pad = 0; |
375 | __be16 rx = 0; |
376 | u16 cmd_resp; |
377 | int ret; |
378 | bool first = true; |
379 | |
380 | if (txb->len < 60) |
381 | pad = 60 - txb->len; |
382 | |
383 | while (1) { |
384 | mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad)); |
385 | ret = mse102x_rx_cmd_spi(mse, rxb: (u8 *)&rx); |
386 | cmd_resp = be16_to_cpu(rx); |
387 | |
388 | if (!ret) { |
389 | /* ready to send frame ? */ |
390 | if (cmd_resp == CMD_CTR) |
391 | break; |
392 | |
393 | net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", |
394 | __func__, cmd_resp); |
395 | mse->stats.invalid_ctr++; |
396 | } |
397 | |
398 | /* It's not predictable how long / many retries it takes to |
399 | * send at least one packet, so TX timeouts are possible. |
400 | * That's the reason why the netdev watchdog is not used here. |
401 | */ |
402 | if (time_after(jiffies, work_timeout)) |
403 | return -ETIMEDOUT; |
404 | |
405 | if (first) { |
406 | /* throttle at first issue */ |
407 | netif_stop_queue(dev: mse->ndev); |
408 | /* fast retry */ |
409 | usleep_range(min: 50, max: 100); |
410 | first = false; |
411 | } else { |
412 | msleep(msecs: 20); |
413 | } |
414 | } |
415 | |
416 | ret = mse102x_tx_frame_spi(mse, txp: txb, pad); |
417 | if (ret) |
418 | net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n", |
419 | __func__, ret); |
420 | |
421 | return ret; |
422 | } |
423 | |
424 | #define TX_QUEUE_MAX 10 |
425 | |
426 | static void mse102x_tx_work(struct work_struct *work) |
427 | { |
428 | /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */ |
429 | unsigned long work_timeout = jiffies + msecs_to_jiffies(m: 1000); |
430 | struct mse102x_net_spi *mses; |
431 | struct mse102x_net *mse; |
432 | struct sk_buff *txb; |
433 | int ret = 0; |
434 | |
435 | mses = container_of(work, struct mse102x_net_spi, tx_work); |
436 | mse = &mses->mse102x; |
437 | |
438 | while ((txb = skb_dequeue(list: &mse->txq))) { |
439 | mutex_lock(&mses->lock); |
440 | ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); |
441 | mutex_unlock(lock: &mses->lock); |
442 | if (ret) { |
443 | mse->ndev->stats.tx_dropped++; |
444 | } else { |
445 | mse->ndev->stats.tx_bytes += txb->len; |
446 | mse->ndev->stats.tx_packets++; |
447 | } |
448 | |
449 | dev_kfree_skb(txb); |
450 | } |
451 | |
452 | if (ret == -ETIMEDOUT) { |
453 | if (netif_msg_timer(mse)) |
454 | netdev_err(dev: mse->ndev, format: "tx work timeout\n"); |
455 | |
456 | mse->stats.tx_timeout++; |
457 | } |
458 | |
459 | netif_wake_queue(dev: mse->ndev); |
460 | } |
461 | |
462 | static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb, |
463 | struct net_device *ndev) |
464 | { |
465 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
466 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
467 | |
468 | netif_dbg(mse, tx_queued, ndev, |
469 | "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); |
470 | |
471 | skb_queue_tail(list: &mse->txq, newsk: skb); |
472 | |
473 | if (skb_queue_len(list_: &mse->txq) >= TX_QUEUE_MAX) |
474 | netif_stop_queue(dev: ndev); |
475 | |
476 | schedule_work(work: &mses->tx_work); |
477 | |
478 | return NETDEV_TX_OK; |
479 | } |
480 | |
481 | static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np) |
482 | { |
483 | struct net_device *ndev = mse->ndev; |
484 | int ret = of_get_ethdev_address(np, dev: ndev); |
485 | |
486 | if (ret) { |
487 | eth_hw_addr_random(dev: ndev); |
488 | netdev_err(dev: ndev, format: "Using random MAC address: %pM\n", |
489 | ndev->dev_addr); |
490 | } |
491 | } |
492 | |
493 | /* Assumption: this is called for every incoming packet */ |
494 | static irqreturn_t mse102x_irq(int irq, void *_mse) |
495 | { |
496 | struct mse102x_net *mse = _mse; |
497 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
498 | |
499 | mutex_lock(&mses->lock); |
500 | mse102x_rx_pkt_spi(mse); |
501 | mutex_unlock(lock: &mses->lock); |
502 | |
503 | return IRQ_HANDLED; |
504 | } |
505 | |
506 | static int mse102x_net_open(struct net_device *ndev) |
507 | { |
508 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
509 | int ret; |
510 | |
511 | ret = request_threaded_irq(irq: ndev->irq, NULL, thread_fn: mse102x_irq, IRQF_ONESHOT, |
512 | name: ndev->name, dev: mse); |
513 | if (ret < 0) { |
514 | netdev_err(dev: ndev, format: "Failed to get irq: %d\n", ret); |
515 | return ret; |
516 | } |
517 | |
518 | netif_dbg(mse, ifup, ndev, "opening\n"); |
519 | |
520 | netif_start_queue(dev: ndev); |
521 | |
522 | netif_carrier_on(dev: ndev); |
523 | |
524 | netif_dbg(mse, ifup, ndev, "network device up\n"); |
525 | |
526 | return 0; |
527 | } |
528 | |
529 | static int mse102x_net_stop(struct net_device *ndev) |
530 | { |
531 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
532 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
533 | |
534 | netif_info(mse, ifdown, ndev, "shutting down\n"); |
535 | |
536 | netif_carrier_off(dev: mse->ndev); |
537 | |
538 | /* stop any outstanding work */ |
539 | flush_work(work: &mses->tx_work); |
540 | |
541 | netif_stop_queue(dev: ndev); |
542 | |
543 | skb_queue_purge(list: &mse->txq); |
544 | |
545 | free_irq(ndev->irq, mse); |
546 | |
547 | return 0; |
548 | } |
549 | |
550 | static const struct net_device_ops mse102x_netdev_ops = { |
551 | .ndo_open = mse102x_net_open, |
552 | .ndo_stop = mse102x_net_stop, |
553 | .ndo_start_xmit = mse102x_start_xmit_spi, |
554 | .ndo_set_mac_address = eth_mac_addr, |
555 | .ndo_validate_addr = eth_validate_addr, |
556 | }; |
557 | |
558 | /* ethtool support */ |
559 | |
560 | static void mse102x_get_drvinfo(struct net_device *ndev, |
561 | struct ethtool_drvinfo *di) |
562 | { |
563 | strscpy(di->driver, DRV_NAME, sizeof(di->driver)); |
564 | strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info)); |
565 | } |
566 | |
567 | static u32 mse102x_get_msglevel(struct net_device *ndev) |
568 | { |
569 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
570 | |
571 | return mse->msg_enable; |
572 | } |
573 | |
574 | static void mse102x_set_msglevel(struct net_device *ndev, u32 to) |
575 | { |
576 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
577 | |
578 | mse->msg_enable = to; |
579 | } |
580 | |
581 | static void mse102x_get_ethtool_stats(struct net_device *ndev, |
582 | struct ethtool_stats *estats, u64 *data) |
583 | { |
584 | struct mse102x_net *mse = netdev_priv(dev: ndev); |
585 | struct mse102x_stats *st = &mse->stats; |
586 | |
587 | memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64)); |
588 | } |
589 | |
590 | static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) |
591 | { |
592 | switch (stringset) { |
593 | case ETH_SS_STATS: |
594 | memcpy(buf, &mse102x_gstrings_stats, |
595 | sizeof(mse102x_gstrings_stats)); |
596 | break; |
597 | default: |
598 | WARN_ON(1); |
599 | break; |
600 | } |
601 | } |
602 | |
603 | static int mse102x_get_sset_count(struct net_device *ndev, int sset) |
604 | { |
605 | switch (sset) { |
606 | case ETH_SS_STATS: |
607 | return ARRAY_SIZE(mse102x_gstrings_stats); |
608 | default: |
609 | return -EINVAL; |
610 | } |
611 | } |
612 | |
613 | static const struct ethtool_ops mse102x_ethtool_ops = { |
614 | .get_drvinfo = mse102x_get_drvinfo, |
615 | .get_link = ethtool_op_get_link, |
616 | .get_msglevel = mse102x_get_msglevel, |
617 | .set_msglevel = mse102x_set_msglevel, |
618 | .get_ethtool_stats = mse102x_get_ethtool_stats, |
619 | .get_strings = mse102x_get_strings, |
620 | .get_sset_count = mse102x_get_sset_count, |
621 | }; |
622 | |
623 | /* driver bus management functions */ |
624 | |
625 | #ifdef CONFIG_PM_SLEEP |
626 | |
627 | static int mse102x_suspend(struct device *dev) |
628 | { |
629 | struct mse102x_net *mse = dev_get_drvdata(dev); |
630 | struct net_device *ndev = mse->ndev; |
631 | |
632 | if (netif_running(dev: ndev)) { |
633 | netif_device_detach(dev: ndev); |
634 | mse102x_net_stop(ndev); |
635 | } |
636 | |
637 | return 0; |
638 | } |
639 | |
640 | static int mse102x_resume(struct device *dev) |
641 | { |
642 | struct mse102x_net *mse = dev_get_drvdata(dev); |
643 | struct net_device *ndev = mse->ndev; |
644 | |
645 | if (netif_running(dev: ndev)) { |
646 | mse102x_net_open(ndev); |
647 | netif_device_attach(dev: ndev); |
648 | } |
649 | |
650 | return 0; |
651 | } |
652 | #endif |
653 | |
654 | static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume); |
655 | |
656 | static int mse102x_probe_spi(struct spi_device *spi) |
657 | { |
658 | struct device *dev = &spi->dev; |
659 | struct mse102x_net_spi *mses; |
660 | struct net_device *ndev; |
661 | struct mse102x_net *mse; |
662 | int ret; |
663 | |
664 | spi->bits_per_word = 8; |
665 | spi->mode |= SPI_MODE_3; |
666 | /* enforce minimum speed to ensure device functionality */ |
667 | spi->controller->min_speed_hz = MIN_FREQ_HZ; |
668 | |
669 | if (!spi->max_speed_hz) |
670 | spi->max_speed_hz = MAX_FREQ_HZ; |
671 | |
672 | if (spi->max_speed_hz < MIN_FREQ_HZ || |
673 | spi->max_speed_hz > MAX_FREQ_HZ) { |
674 | dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n", |
675 | MIN_FREQ_HZ, MAX_FREQ_HZ); |
676 | return -EINVAL; |
677 | } |
678 | |
679 | ret = spi_setup(spi); |
680 | if (ret < 0) { |
681 | dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret); |
682 | return ret; |
683 | } |
684 | |
685 | ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi)); |
686 | if (!ndev) |
687 | return -ENOMEM; |
688 | |
689 | ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4); |
690 | ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4); |
691 | ndev->priv_flags &= ~IFF_TX_SKB_SHARING; |
692 | ndev->tx_queue_len = 100; |
693 | |
694 | mse = netdev_priv(dev: ndev); |
695 | mses = to_mse102x_spi(mse); |
696 | |
697 | mses->spidev = spi; |
698 | mutex_init(&mses->lock); |
699 | INIT_WORK(&mses->tx_work, mse102x_tx_work); |
700 | |
701 | /* initialise pre-made spi transfer messages */ |
702 | spi_message_init(m: &mses->spi_msg); |
703 | spi_message_add_tail(t: &mses->spi_xfer, m: &mses->spi_msg); |
704 | |
705 | ndev->irq = spi->irq; |
706 | mse->ndev = ndev; |
707 | |
708 | /* set the default message enable */ |
709 | mse->msg_enable = netif_msg_init(debug_value: -1, MSG_DEFAULT); |
710 | |
711 | skb_queue_head_init(list: &mse->txq); |
712 | |
713 | SET_NETDEV_DEV(ndev, dev); |
714 | |
715 | dev_set_drvdata(dev, data: mse); |
716 | |
717 | netif_carrier_off(dev: mse->ndev); |
718 | ndev->netdev_ops = &mse102x_netdev_ops; |
719 | ndev->ethtool_ops = &mse102x_ethtool_ops; |
720 | |
721 | mse102x_init_mac(mse, np: dev->of_node); |
722 | |
723 | ret = register_netdev(dev: ndev); |
724 | if (ret) { |
725 | dev_err(dev, "failed to register network device: %d\n", ret); |
726 | return ret; |
727 | } |
728 | |
729 | mse102x_init_device_debugfs(mses); |
730 | |
731 | return 0; |
732 | } |
733 | |
734 | static void mse102x_remove_spi(struct spi_device *spi) |
735 | { |
736 | struct mse102x_net *mse = dev_get_drvdata(dev: &spi->dev); |
737 | struct mse102x_net_spi *mses = to_mse102x_spi(mse); |
738 | |
739 | if (netif_msg_drv(mse)) |
740 | dev_info(&spi->dev, "remove\n"); |
741 | |
742 | mse102x_remove_device_debugfs(mses); |
743 | unregister_netdev(dev: mse->ndev); |
744 | } |
745 | |
746 | static const struct of_device_id mse102x_match_table[] = { |
747 | { .compatible = "vertexcom,mse1021"}, |
748 | { .compatible = "vertexcom,mse1022"}, |
749 | { } |
750 | }; |
751 | MODULE_DEVICE_TABLE(of, mse102x_match_table); |
752 | |
753 | static const struct spi_device_id mse102x_ids[] = { |
754 | { "mse1021"}, |
755 | { "mse1022"}, |
756 | { } |
757 | }; |
758 | MODULE_DEVICE_TABLE(spi, mse102x_ids); |
759 | |
760 | static struct spi_driver mse102x_driver = { |
761 | .driver = { |
762 | .name = DRV_NAME, |
763 | .of_match_table = mse102x_match_table, |
764 | .pm = &mse102x_pm_ops, |
765 | }, |
766 | .probe = mse102x_probe_spi, |
767 | .remove = mse102x_remove_spi, |
768 | .id_table = mse102x_ids, |
769 | }; |
770 | module_spi_driver(mse102x_driver); |
771 | |
772 | MODULE_DESCRIPTION("MSE102x Network driver"); |
773 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@chargebyte.com>"); |
774 | MODULE_LICENSE("GPL"); |
775 | MODULE_ALIAS("spi:"DRV_NAME); |
776 |
Definitions
- mse102x_stats
- mse102x_gstrings_stats
- mse102x_net
- mse102x_net_spi
- mse102x_info_show
- mse102x_init_device_debugfs
- mse102x_remove_device_debugfs
- mse102x_tx_cmd_spi
- mse102x_rx_cmd_spi
- mse102x_push_header
- mse102x_put_footer
- mse102x_tx_frame_spi
- mse102x_rx_frame_spi
- mse102x_dump_packet
- mse102x_rx_pkt_spi
- mse102x_tx_pkt_spi
- mse102x_tx_work
- mse102x_start_xmit_spi
- mse102x_init_mac
- mse102x_irq
- mse102x_net_open
- mse102x_net_stop
- mse102x_netdev_ops
- mse102x_get_drvinfo
- mse102x_get_msglevel
- mse102x_set_msglevel
- mse102x_get_ethtool_stats
- mse102x_get_strings
- mse102x_get_sset_count
- mse102x_ethtool_ops
- mse102x_suspend
- mse102x_resume
- mse102x_pm_ops
- mse102x_probe_spi
- mse102x_remove_spi
- mse102x_match_table
- mse102x_ids
Improve your Profiling and Debugging skills
Find out more