| 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause |
| 2 | /* |
| 3 | * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. |
| 4 | * Copyright (c) 2014, I2SE GmbH |
| 5 | */ |
| 6 | |
| 7 | /* This module implements the Qualcomm Atheros SPI protocol for |
| 8 | * kernel-based SPI device; it is essentially an Ethernet-to-SPI |
| 9 | * serial converter; |
| 10 | */ |
| 11 | |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/if_arp.h> |
| 15 | #include <linux/if_ether.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/jiffies.h> |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/kthread.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/moduleparam.h> |
| 23 | #include <linux/netdevice.h> |
| 24 | #include <linux/of.h> |
| 25 | #include <linux/of_net.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <linux/skbuff.h> |
| 28 | #include <linux/spi/spi.h> |
| 29 | #include <linux/types.h> |
| 30 | |
| 31 | #include "qca_7k.h" |
| 32 | #include "qca_7k_common.h" |
| 33 | #include "qca_debug.h" |
| 34 | #include "qca_spi.h" |
| 35 | |
| 36 | #define MAX_DMA_BURST_LEN 5000 |
| 37 | |
| 38 | #define SPI_INTR 0 |
| 39 | #define SPI_RESET 1 |
| 40 | |
| 41 | /* Modules parameters */ |
| 42 | #define QCASPI_CLK_SPEED_MIN 1000000 |
| 43 | #define QCASPI_CLK_SPEED_MAX 16000000 |
| 44 | #define QCASPI_CLK_SPEED 8000000 |
| 45 | static int qcaspi_clkspeed; |
| 46 | module_param(qcaspi_clkspeed, int, 0); |
| 47 | MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000." ); |
| 48 | |
| 49 | #define QCASPI_BURST_LEN_MIN 1 |
| 50 | #define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN |
| 51 | static int qcaspi_burst_len = MAX_DMA_BURST_LEN; |
| 52 | module_param(qcaspi_burst_len, int, 0); |
| 53 | MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000." ); |
| 54 | |
| 55 | #define QCASPI_PLUGGABLE_MIN 0 |
| 56 | #define QCASPI_PLUGGABLE_MAX 1 |
| 57 | static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX; |
| 58 | module_param(qcaspi_pluggable, int, 0); |
| 59 | MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)." ); |
| 60 | |
| 61 | #define QCASPI_WRITE_VERIFY_MIN 0 |
| 62 | #define QCASPI_WRITE_VERIFY_MAX 3 |
| 63 | static int wr_verify = QCASPI_WRITE_VERIFY_MIN; |
| 64 | module_param(wr_verify, int, 0); |
| 65 | MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3." ); |
| 66 | |
| 67 | #define QCASPI_TX_TIMEOUT (1 * HZ) |
| 68 | #define QCASPI_QCA7K_REBOOT_TIME_MS 1000 |
| 69 | |
| 70 | static void |
| 71 | start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) |
| 72 | { |
| 73 | *intr_cause = 0; |
| 74 | |
| 75 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, value: 0, retry: wr_verify); |
| 76 | qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, result: intr_cause); |
| 77 | netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n" , *intr_cause); |
| 78 | } |
| 79 | |
| 80 | static void |
| 81 | end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) |
| 82 | { |
| 83 | u16 intr_enable = (SPI_INT_CPU_ON | |
| 84 | SPI_INT_PKT_AVLBL | |
| 85 | SPI_INT_RDBUF_ERR | |
| 86 | SPI_INT_WRBUF_ERR); |
| 87 | |
| 88 | qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, value: intr_cause, retry: 0); |
| 89 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, value: intr_enable, retry: wr_verify); |
| 90 | netdev_dbg(qca->net_dev, "acking int: 0x%04x\n" , intr_cause); |
| 91 | } |
| 92 | |
| 93 | static u32 |
| 94 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) |
| 95 | { |
| 96 | __be16 cmd; |
| 97 | struct spi_message msg; |
| 98 | struct spi_transfer transfer[2]; |
| 99 | int ret; |
| 100 | |
| 101 | memset(&transfer, 0, sizeof(transfer)); |
| 102 | spi_message_init(m: &msg); |
| 103 | |
| 104 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
| 105 | transfer[0].tx_buf = &cmd; |
| 106 | transfer[0].len = QCASPI_CMD_LEN; |
| 107 | transfer[1].tx_buf = src; |
| 108 | transfer[1].len = len; |
| 109 | |
| 110 | spi_message_add_tail(t: &transfer[0], m: &msg); |
| 111 | spi_message_add_tail(t: &transfer[1], m: &msg); |
| 112 | ret = spi_sync(spi: qca->spi_dev, message: &msg); |
| 113 | |
| 114 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
| 115 | qcaspi_spi_error(qca); |
| 116 | return 0; |
| 117 | } |
| 118 | |
| 119 | return len; |
| 120 | } |
| 121 | |
| 122 | static u32 |
| 123 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) |
| 124 | { |
| 125 | struct spi_message msg; |
| 126 | struct spi_transfer transfer; |
| 127 | int ret; |
| 128 | |
| 129 | memset(&transfer, 0, sizeof(transfer)); |
| 130 | spi_message_init(m: &msg); |
| 131 | |
| 132 | transfer.tx_buf = src; |
| 133 | transfer.len = len; |
| 134 | |
| 135 | spi_message_add_tail(t: &transfer, m: &msg); |
| 136 | ret = spi_sync(spi: qca->spi_dev, message: &msg); |
| 137 | |
| 138 | if (ret || (msg.actual_length != len)) { |
| 139 | qcaspi_spi_error(qca); |
| 140 | return 0; |
| 141 | } |
| 142 | |
| 143 | return len; |
| 144 | } |
| 145 | |
| 146 | static u32 |
| 147 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) |
| 148 | { |
| 149 | struct spi_message msg; |
| 150 | __be16 cmd; |
| 151 | struct spi_transfer transfer[2]; |
| 152 | int ret; |
| 153 | |
| 154 | memset(&transfer, 0, sizeof(transfer)); |
| 155 | spi_message_init(m: &msg); |
| 156 | |
| 157 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
| 158 | transfer[0].tx_buf = &cmd; |
| 159 | transfer[0].len = QCASPI_CMD_LEN; |
| 160 | transfer[1].rx_buf = dst; |
| 161 | transfer[1].len = len; |
| 162 | |
| 163 | spi_message_add_tail(t: &transfer[0], m: &msg); |
| 164 | spi_message_add_tail(t: &transfer[1], m: &msg); |
| 165 | ret = spi_sync(spi: qca->spi_dev, message: &msg); |
| 166 | |
| 167 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
| 168 | qcaspi_spi_error(qca); |
| 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | return len; |
| 173 | } |
| 174 | |
| 175 | static u32 |
| 176 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) |
| 177 | { |
| 178 | struct spi_message msg; |
| 179 | struct spi_transfer transfer; |
| 180 | int ret; |
| 181 | |
| 182 | memset(&transfer, 0, sizeof(transfer)); |
| 183 | spi_message_init(m: &msg); |
| 184 | |
| 185 | transfer.rx_buf = dst; |
| 186 | transfer.len = len; |
| 187 | |
| 188 | spi_message_add_tail(t: &transfer, m: &msg); |
| 189 | ret = spi_sync(spi: qca->spi_dev, message: &msg); |
| 190 | |
| 191 | if (ret || (msg.actual_length != len)) { |
| 192 | qcaspi_spi_error(qca); |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | return len; |
| 197 | } |
| 198 | |
| 199 | static int |
| 200 | qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) |
| 201 | { |
| 202 | __be16 tx_data; |
| 203 | struct spi_message msg; |
| 204 | struct spi_transfer transfer; |
| 205 | int ret; |
| 206 | |
| 207 | memset(&transfer, 0, sizeof(transfer)); |
| 208 | |
| 209 | spi_message_init(m: &msg); |
| 210 | |
| 211 | tx_data = cpu_to_be16(cmd); |
| 212 | transfer.len = sizeof(cmd); |
| 213 | transfer.tx_buf = &tx_data; |
| 214 | spi_message_add_tail(t: &transfer, m: &msg); |
| 215 | |
| 216 | ret = spi_sync(spi: qca->spi_dev, message: &msg); |
| 217 | |
| 218 | if (!ret) |
| 219 | ret = msg.status; |
| 220 | |
| 221 | if (ret) |
| 222 | qcaspi_spi_error(qca); |
| 223 | |
| 224 | return ret; |
| 225 | } |
| 226 | |
| 227 | static int |
| 228 | qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) |
| 229 | { |
| 230 | u32 count; |
| 231 | u32 written; |
| 232 | u32 offset; |
| 233 | u32 len; |
| 234 | |
| 235 | len = skb->len; |
| 236 | |
| 237 | qcaspi_write_register(qca, SPI_REG_BFR_SIZE, value: len, retry: wr_verify); |
| 238 | if (qca->legacy_mode) |
| 239 | qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
| 240 | |
| 241 | offset = 0; |
| 242 | while (len) { |
| 243 | count = len; |
| 244 | if (count > qca->burst_len) |
| 245 | count = qca->burst_len; |
| 246 | |
| 247 | if (qca->legacy_mode) { |
| 248 | written = qcaspi_write_legacy(qca, |
| 249 | src: skb->data + offset, |
| 250 | len: count); |
| 251 | } else { |
| 252 | written = qcaspi_write_burst(qca, |
| 253 | src: skb->data + offset, |
| 254 | len: count); |
| 255 | } |
| 256 | |
| 257 | if (written != count) |
| 258 | return -1; |
| 259 | |
| 260 | offset += count; |
| 261 | len -= count; |
| 262 | } |
| 263 | |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static int |
| 268 | qcaspi_transmit(struct qcaspi *qca) |
| 269 | { |
| 270 | struct net_device_stats *n_stats = &qca->net_dev->stats; |
| 271 | u16 available = 0; |
| 272 | u32 pkt_len; |
| 273 | u16 new_head; |
| 274 | u16 packets = 0; |
| 275 | |
| 276 | if (qca->txr.skb[qca->txr.head] == NULL) |
| 277 | return 0; |
| 278 | |
| 279 | qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, result: &available); |
| 280 | |
| 281 | if (available > QCASPI_HW_BUF_LEN) { |
| 282 | /* This could only happen by interferences on the SPI line. |
| 283 | * So retry later ... |
| 284 | */ |
| 285 | qca->stats.buf_avail_err++; |
| 286 | return -1; |
| 287 | } |
| 288 | |
| 289 | while (qca->txr.skb[qca->txr.head]) { |
| 290 | pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; |
| 291 | |
| 292 | if (available < pkt_len) { |
| 293 | if (packets == 0) |
| 294 | qca->stats.write_buf_miss++; |
| 295 | break; |
| 296 | } |
| 297 | |
| 298 | if (qcaspi_tx_frame(qca, skb: qca->txr.skb[qca->txr.head]) == -1) { |
| 299 | qca->stats.write_err++; |
| 300 | return -1; |
| 301 | } |
| 302 | |
| 303 | packets++; |
| 304 | n_stats->tx_packets++; |
| 305 | n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; |
| 306 | available -= pkt_len; |
| 307 | |
| 308 | /* remove the skb from the queue */ |
| 309 | /* XXX After inconsistent lock states netif_tx_lock() |
| 310 | * has been replaced by netif_tx_lock_bh() and so on. |
| 311 | */ |
| 312 | netif_tx_lock_bh(dev: qca->net_dev); |
| 313 | dev_kfree_skb(qca->txr.skb[qca->txr.head]); |
| 314 | qca->txr.skb[qca->txr.head] = NULL; |
| 315 | qca->txr.size -= pkt_len; |
| 316 | new_head = qca->txr.head + 1; |
| 317 | if (new_head >= qca->txr.count) |
| 318 | new_head = 0; |
| 319 | qca->txr.head = new_head; |
| 320 | if (netif_queue_stopped(dev: qca->net_dev)) |
| 321 | netif_wake_queue(dev: qca->net_dev); |
| 322 | netif_tx_unlock_bh(dev: qca->net_dev); |
| 323 | } |
| 324 | |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | static int |
| 329 | qcaspi_receive(struct qcaspi *qca) |
| 330 | { |
| 331 | struct net_device *net_dev = qca->net_dev; |
| 332 | struct net_device_stats *n_stats = &net_dev->stats; |
| 333 | u16 available = 0; |
| 334 | u32 bytes_read; |
| 335 | u8 *cp; |
| 336 | |
| 337 | /* Allocate rx SKB if we don't have one available. */ |
| 338 | if (!qca->rx_skb) { |
| 339 | qca->rx_skb = netdev_alloc_skb_ip_align(dev: net_dev, |
| 340 | length: net_dev->mtu + |
| 341 | VLAN_ETH_HLEN); |
| 342 | if (!qca->rx_skb) { |
| 343 | netdev_dbg(net_dev, "out of RX resources\n" ); |
| 344 | qca->stats.out_of_mem++; |
| 345 | return -1; |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | /* Read the packet size. */ |
| 350 | qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, result: &available); |
| 351 | |
| 352 | netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n" , |
| 353 | available); |
| 354 | |
| 355 | if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) { |
| 356 | /* This could only happen by interferences on the SPI line. |
| 357 | * So retry later ... |
| 358 | */ |
| 359 | qca->stats.buf_avail_err++; |
| 360 | return -1; |
| 361 | } else if (available == 0) { |
| 362 | netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n" ); |
| 363 | return -1; |
| 364 | } |
| 365 | |
| 366 | qcaspi_write_register(qca, SPI_REG_BFR_SIZE, value: available, retry: wr_verify); |
| 367 | |
| 368 | if (qca->legacy_mode) |
| 369 | qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
| 370 | |
| 371 | while (available) { |
| 372 | u32 count = available; |
| 373 | |
| 374 | if (count > qca->burst_len) |
| 375 | count = qca->burst_len; |
| 376 | |
| 377 | if (qca->legacy_mode) { |
| 378 | bytes_read = qcaspi_read_legacy(qca, dst: qca->rx_buffer, |
| 379 | len: count); |
| 380 | } else { |
| 381 | bytes_read = qcaspi_read_burst(qca, dst: qca->rx_buffer, |
| 382 | len: count); |
| 383 | } |
| 384 | |
| 385 | netdev_dbg(net_dev, "available: %d, byte read: %d\n" , |
| 386 | available, bytes_read); |
| 387 | |
| 388 | if (bytes_read) { |
| 389 | available -= bytes_read; |
| 390 | } else { |
| 391 | qca->stats.read_err++; |
| 392 | return -1; |
| 393 | } |
| 394 | |
| 395 | cp = qca->rx_buffer; |
| 396 | |
| 397 | while ((bytes_read--) && (qca->rx_skb)) { |
| 398 | s32 retcode; |
| 399 | |
| 400 | retcode = qcafrm_fsm_decode(handle: &qca->frm_handle, |
| 401 | buf: qca->rx_skb->data, |
| 402 | buf_len: skb_tailroom(skb: qca->rx_skb), |
| 403 | recv_byte: *cp); |
| 404 | cp++; |
| 405 | switch (retcode) { |
| 406 | case QCAFRM_GATHER: |
| 407 | case QCAFRM_NOHEAD: |
| 408 | break; |
| 409 | case QCAFRM_NOTAIL: |
| 410 | netdev_dbg(net_dev, "no RX tail\n" ); |
| 411 | n_stats->rx_errors++; |
| 412 | n_stats->rx_dropped++; |
| 413 | break; |
| 414 | case QCAFRM_INVLEN: |
| 415 | netdev_dbg(net_dev, "invalid RX length\n" ); |
| 416 | n_stats->rx_errors++; |
| 417 | n_stats->rx_dropped++; |
| 418 | break; |
| 419 | default: |
| 420 | qca->rx_skb->dev = qca->net_dev; |
| 421 | n_stats->rx_packets++; |
| 422 | n_stats->rx_bytes += retcode; |
| 423 | skb_put(skb: qca->rx_skb, len: retcode); |
| 424 | qca->rx_skb->protocol = eth_type_trans( |
| 425 | skb: qca->rx_skb, dev: qca->rx_skb->dev); |
| 426 | skb_checksum_none_assert(skb: qca->rx_skb); |
| 427 | netif_rx(skb: qca->rx_skb); |
| 428 | qca->rx_skb = netdev_alloc_skb_ip_align(dev: net_dev, |
| 429 | length: net_dev->mtu + VLAN_ETH_HLEN); |
| 430 | if (!qca->rx_skb) { |
| 431 | netdev_dbg(net_dev, "out of RX resources\n" ); |
| 432 | n_stats->rx_errors++; |
| 433 | qca->stats.out_of_mem++; |
| 434 | break; |
| 435 | } |
| 436 | } |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | return 0; |
| 441 | } |
| 442 | |
| 443 | /* Check that tx ring stores only so much bytes |
| 444 | * that fit into the internal QCA buffer. |
| 445 | */ |
| 446 | |
| 447 | static int |
| 448 | qcaspi_tx_ring_has_space(struct tx_ring *txr) |
| 449 | { |
| 450 | if (txr->skb[txr->tail]) |
| 451 | return 0; |
| 452 | |
| 453 | return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0; |
| 454 | } |
| 455 | |
| 456 | /* Flush the tx ring. This function is only safe to |
| 457 | * call from the qcaspi_spi_thread. |
| 458 | */ |
| 459 | |
| 460 | static void |
| 461 | qcaspi_flush_tx_ring(struct qcaspi *qca) |
| 462 | { |
| 463 | int i; |
| 464 | |
| 465 | /* XXX After inconsistent lock states netif_tx_lock() |
| 466 | * has been replaced by netif_tx_lock_bh() and so on. |
| 467 | */ |
| 468 | netif_tx_lock_bh(dev: qca->net_dev); |
| 469 | for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) { |
| 470 | if (qca->txr.skb[i]) { |
| 471 | dev_kfree_skb(qca->txr.skb[i]); |
| 472 | qca->txr.skb[i] = NULL; |
| 473 | qca->net_dev->stats.tx_dropped++; |
| 474 | } |
| 475 | } |
| 476 | qca->txr.tail = 0; |
| 477 | qca->txr.head = 0; |
| 478 | qca->txr.size = 0; |
| 479 | netif_tx_unlock_bh(dev: qca->net_dev); |
| 480 | } |
| 481 | |
| 482 | static void |
| 483 | qcaspi_qca7k_sync(struct qcaspi *qca, int event) |
| 484 | { |
| 485 | u16 signature = 0; |
| 486 | u16 spi_config; |
| 487 | u16 wrbuf_space = 0; |
| 488 | |
| 489 | if (event == QCASPI_EVENT_CPUON) { |
| 490 | /* Read signature twice, if not valid |
| 491 | * go back to unknown state. |
| 492 | */ |
| 493 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 494 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 495 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 496 | if (qca->sync == QCASPI_SYNC_READY) |
| 497 | qca->stats.bad_signature++; |
| 498 | |
| 499 | set_bit(SPI_RESET, addr: &qca->flags); |
| 500 | netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n" ); |
| 501 | return; |
| 502 | } else { |
| 503 | /* ensure that the WRBUF is empty */ |
| 504 | qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, |
| 505 | result: &wrbuf_space); |
| 506 | if (wrbuf_space != QCASPI_HW_BUF_LEN) { |
| 507 | netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n" ); |
| 508 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 509 | qca->stats.buf_avail_err++; |
| 510 | } else { |
| 511 | netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n" ); |
| 512 | qca->sync = QCASPI_SYNC_READY; |
| 513 | return; |
| 514 | } |
| 515 | } |
| 516 | } else { |
| 517 | /* Handle reset only on QCASPI_EVENT_UPDATE */ |
| 518 | if (test_and_clear_bit(SPI_RESET, addr: &qca->flags)) |
| 519 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 520 | } |
| 521 | |
| 522 | switch (qca->sync) { |
| 523 | case QCASPI_SYNC_READY: |
| 524 | /* Check signature twice, if not valid go to unknown state. */ |
| 525 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 526 | if (signature != QCASPI_GOOD_SIGNATURE) |
| 527 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 528 | |
| 529 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 530 | set_bit(SPI_RESET, addr: &qca->flags); |
| 531 | qca->stats.bad_signature++; |
| 532 | netdev_dbg(qca->net_dev, "sync: bad signature, restart\n" ); |
| 533 | /* don't reset right away */ |
| 534 | return; |
| 535 | } |
| 536 | break; |
| 537 | case QCASPI_SYNC_UNKNOWN: |
| 538 | /* Read signature, if not valid stay in unknown state */ |
| 539 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 540 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 541 | netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n" ); |
| 542 | return; |
| 543 | } |
| 544 | |
| 545 | /* TODO: use GPIO to reset QCA7000 in legacy mode*/ |
| 546 | netdev_dbg(qca->net_dev, "sync: resetting device.\n" ); |
| 547 | qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, result: &spi_config); |
| 548 | spi_config |= QCASPI_SLAVE_RESET_BIT; |
| 549 | qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, value: spi_config, retry: 0); |
| 550 | |
| 551 | qca->sync = QCASPI_SYNC_RESET; |
| 552 | qca->stats.trig_reset++; |
| 553 | qca->reset_count = 0; |
| 554 | break; |
| 555 | case QCASPI_SYNC_RESET: |
| 556 | qca->reset_count++; |
| 557 | netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n" , |
| 558 | qca->reset_count); |
| 559 | if (qca->reset_count >= QCASPI_RESET_TIMEOUT) { |
| 560 | /* reset did not seem to take place, try again */ |
| 561 | set_bit(SPI_RESET, addr: &qca->flags); |
| 562 | qca->stats.reset_timeout++; |
| 563 | netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n" ); |
| 564 | } |
| 565 | break; |
| 566 | } |
| 567 | } |
| 568 | |
| 569 | static int |
| 570 | qcaspi_spi_thread(void *data) |
| 571 | { |
| 572 | struct qcaspi *qca = data; |
| 573 | u16 intr_cause = 0; |
| 574 | |
| 575 | netdev_info(dev: qca->net_dev, format: "SPI thread created\n" ); |
| 576 | while (!kthread_should_stop()) { |
| 577 | set_current_state(TASK_INTERRUPTIBLE); |
| 578 | if (kthread_should_park()) { |
| 579 | netif_tx_disable(dev: qca->net_dev); |
| 580 | netif_carrier_off(dev: qca->net_dev); |
| 581 | qcaspi_flush_tx_ring(qca); |
| 582 | kthread_parkme(); |
| 583 | if (qca->sync == QCASPI_SYNC_READY) { |
| 584 | netif_carrier_on(dev: qca->net_dev); |
| 585 | netif_wake_queue(dev: qca->net_dev); |
| 586 | } |
| 587 | continue; |
| 588 | } |
| 589 | |
| 590 | if (!qca->flags && |
| 591 | !qca->txr.skb[qca->txr.head]) |
| 592 | schedule(); |
| 593 | |
| 594 | set_current_state(TASK_RUNNING); |
| 595 | |
| 596 | netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n" , |
| 597 | qca->flags, |
| 598 | qca->txr.skb[qca->txr.head]); |
| 599 | |
| 600 | qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); |
| 601 | |
| 602 | if (qca->sync != QCASPI_SYNC_READY) { |
| 603 | netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n" , |
| 604 | (unsigned int)qca->sync); |
| 605 | netif_stop_queue(dev: qca->net_dev); |
| 606 | netif_carrier_off(dev: qca->net_dev); |
| 607 | qcaspi_flush_tx_ring(qca); |
| 608 | msleep(QCASPI_QCA7K_REBOOT_TIME_MS); |
| 609 | } |
| 610 | |
| 611 | if (test_and_clear_bit(SPI_INTR, addr: &qca->flags)) { |
| 612 | start_spi_intr_handling(qca, intr_cause: &intr_cause); |
| 613 | |
| 614 | if (intr_cause & SPI_INT_CPU_ON) { |
| 615 | qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); |
| 616 | |
| 617 | /* Frame decoding in progress */ |
| 618 | if (qca->frm_handle.state != qca->frm_handle.init) |
| 619 | qca->net_dev->stats.rx_dropped++; |
| 620 | |
| 621 | qcafrm_fsm_init_spi(handle: &qca->frm_handle); |
| 622 | qca->stats.device_reset++; |
| 623 | |
| 624 | /* not synced. */ |
| 625 | if (qca->sync != QCASPI_SYNC_READY) |
| 626 | continue; |
| 627 | |
| 628 | netif_wake_queue(dev: qca->net_dev); |
| 629 | netif_carrier_on(dev: qca->net_dev); |
| 630 | } |
| 631 | |
| 632 | if (intr_cause & SPI_INT_RDBUF_ERR) { |
| 633 | /* restart sync */ |
| 634 | netdev_dbg(qca->net_dev, "===> rdbuf error!\n" ); |
| 635 | qca->stats.read_buf_err++; |
| 636 | set_bit(SPI_RESET, addr: &qca->flags); |
| 637 | continue; |
| 638 | } |
| 639 | |
| 640 | if (intr_cause & SPI_INT_WRBUF_ERR) { |
| 641 | /* restart sync */ |
| 642 | netdev_dbg(qca->net_dev, "===> wrbuf error!\n" ); |
| 643 | qca->stats.write_buf_err++; |
| 644 | set_bit(SPI_RESET, addr: &qca->flags); |
| 645 | continue; |
| 646 | } |
| 647 | |
| 648 | /* can only handle other interrupts |
| 649 | * if sync has occurred |
| 650 | */ |
| 651 | if (qca->sync == QCASPI_SYNC_READY) { |
| 652 | if (intr_cause & SPI_INT_PKT_AVLBL) |
| 653 | qcaspi_receive(qca); |
| 654 | } |
| 655 | |
| 656 | end_spi_intr_handling(qca, intr_cause); |
| 657 | } |
| 658 | |
| 659 | if (qca->sync == QCASPI_SYNC_READY) |
| 660 | qcaspi_transmit(qca); |
| 661 | } |
| 662 | set_current_state(TASK_RUNNING); |
| 663 | netdev_info(dev: qca->net_dev, format: "SPI thread exit\n" ); |
| 664 | |
| 665 | return 0; |
| 666 | } |
| 667 | |
| 668 | static irqreturn_t |
| 669 | qcaspi_intr_handler(int irq, void *data) |
| 670 | { |
| 671 | struct qcaspi *qca = data; |
| 672 | |
| 673 | set_bit(SPI_INTR, addr: &qca->flags); |
| 674 | if (qca->spi_thread) |
| 675 | wake_up_process(tsk: qca->spi_thread); |
| 676 | |
| 677 | return IRQ_HANDLED; |
| 678 | } |
| 679 | |
| 680 | static int |
| 681 | qcaspi_netdev_open(struct net_device *dev) |
| 682 | { |
| 683 | struct qcaspi *qca = netdev_priv(dev); |
| 684 | struct task_struct *thread; |
| 685 | |
| 686 | if (!qca) |
| 687 | return -EINVAL; |
| 688 | |
| 689 | set_bit(SPI_INTR, addr: &qca->flags); |
| 690 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 691 | qcafrm_fsm_init_spi(handle: &qca->frm_handle); |
| 692 | |
| 693 | thread = kthread_run((void *)qcaspi_spi_thread, |
| 694 | qca, "%s" , dev->name); |
| 695 | |
| 696 | if (IS_ERR(ptr: thread)) { |
| 697 | netdev_err(dev, format: "%s: unable to start kernel thread.\n" , |
| 698 | QCASPI_DRV_NAME); |
| 699 | return PTR_ERR(ptr: thread); |
| 700 | } |
| 701 | |
| 702 | qca->spi_thread = thread; |
| 703 | |
| 704 | enable_irq(irq: qca->spi_dev->irq); |
| 705 | |
| 706 | /* SPI thread takes care of TX queue */ |
| 707 | |
| 708 | return 0; |
| 709 | } |
| 710 | |
| 711 | static int |
| 712 | qcaspi_netdev_close(struct net_device *dev) |
| 713 | { |
| 714 | struct qcaspi *qca = netdev_priv(dev); |
| 715 | |
| 716 | netif_stop_queue(dev); |
| 717 | |
| 718 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, value: 0, retry: wr_verify); |
| 719 | disable_irq(irq: qca->spi_dev->irq); |
| 720 | |
| 721 | if (qca->spi_thread) { |
| 722 | kthread_stop(k: qca->spi_thread); |
| 723 | qca->spi_thread = NULL; |
| 724 | } |
| 725 | qcaspi_flush_tx_ring(qca); |
| 726 | |
| 727 | return 0; |
| 728 | } |
| 729 | |
| 730 | static netdev_tx_t |
| 731 | qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) |
| 732 | { |
| 733 | u32 frame_len; |
| 734 | u8 *ptmp; |
| 735 | struct qcaspi *qca = netdev_priv(dev); |
| 736 | u16 new_tail; |
| 737 | struct sk_buff *tskb; |
| 738 | u8 pad_len = 0; |
| 739 | |
| 740 | if (skb->len < QCAFRM_MIN_LEN) |
| 741 | pad_len = QCAFRM_MIN_LEN - skb->len; |
| 742 | |
| 743 | if (qca->txr.skb[qca->txr.tail]) { |
| 744 | netdev_warn(dev: qca->net_dev, format: "queue was unexpectedly full!\n" ); |
| 745 | netif_stop_queue(dev: qca->net_dev); |
| 746 | qca->stats.ring_full++; |
| 747 | return NETDEV_TX_BUSY; |
| 748 | } |
| 749 | |
| 750 | if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) || |
| 751 | (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) { |
| 752 | tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, |
| 753 | QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); |
| 754 | if (!tskb) { |
| 755 | qca->stats.out_of_mem++; |
| 756 | return NETDEV_TX_BUSY; |
| 757 | } |
| 758 | dev_kfree_skb(skb); |
| 759 | skb = tskb; |
| 760 | } |
| 761 | |
| 762 | frame_len = skb->len + pad_len; |
| 763 | |
| 764 | ptmp = skb_push(skb, QCAFRM_HEADER_LEN); |
| 765 | qcafrm_create_header(buf: ptmp, len: frame_len); |
| 766 | |
| 767 | if (pad_len) { |
| 768 | ptmp = skb_put_zero(skb, len: pad_len); |
| 769 | } |
| 770 | |
| 771 | ptmp = skb_put(skb, QCAFRM_FOOTER_LEN); |
| 772 | qcafrm_create_footer(buf: ptmp); |
| 773 | |
| 774 | netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n" , |
| 775 | skb->len); |
| 776 | |
| 777 | qca->txr.size += skb->len + QCASPI_HW_PKT_LEN; |
| 778 | |
| 779 | new_tail = qca->txr.tail + 1; |
| 780 | if (new_tail >= qca->txr.count) |
| 781 | new_tail = 0; |
| 782 | |
| 783 | qca->txr.skb[qca->txr.tail] = skb; |
| 784 | qca->txr.tail = new_tail; |
| 785 | |
| 786 | if (!qcaspi_tx_ring_has_space(txr: &qca->txr)) { |
| 787 | netif_stop_queue(dev: qca->net_dev); |
| 788 | qca->stats.ring_full++; |
| 789 | } |
| 790 | |
| 791 | netif_trans_update(dev); |
| 792 | |
| 793 | if (qca->spi_thread) |
| 794 | wake_up_process(tsk: qca->spi_thread); |
| 795 | |
| 796 | return NETDEV_TX_OK; |
| 797 | } |
| 798 | |
| 799 | static void |
| 800 | qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue) |
| 801 | { |
| 802 | struct qcaspi *qca = netdev_priv(dev); |
| 803 | |
| 804 | netdev_info(dev: qca->net_dev, format: "Transmit timeout at %ld, latency %ld\n" , |
| 805 | jiffies, jiffies - dev_trans_start(dev)); |
| 806 | qca->net_dev->stats.tx_errors++; |
| 807 | /* Trigger tx queue flush and QCA7000 reset */ |
| 808 | set_bit(SPI_RESET, addr: &qca->flags); |
| 809 | |
| 810 | if (qca->spi_thread) |
| 811 | wake_up_process(tsk: qca->spi_thread); |
| 812 | } |
| 813 | |
| 814 | static int |
| 815 | qcaspi_netdev_init(struct net_device *dev) |
| 816 | { |
| 817 | struct qcaspi *qca = netdev_priv(dev); |
| 818 | |
| 819 | dev->mtu = QCAFRM_MAX_MTU; |
| 820 | dev->type = ARPHRD_ETHER; |
| 821 | qca->burst_len = qcaspi_burst_len; |
| 822 | qca->spi_thread = NULL; |
| 823 | qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + |
| 824 | QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES; |
| 825 | |
| 826 | memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); |
| 827 | |
| 828 | qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); |
| 829 | if (!qca->rx_buffer) |
| 830 | return -ENOBUFS; |
| 831 | |
| 832 | qca->rx_skb = netdev_alloc_skb_ip_align(dev, length: qca->net_dev->mtu + |
| 833 | VLAN_ETH_HLEN); |
| 834 | if (!qca->rx_skb) { |
| 835 | kfree(objp: qca->rx_buffer); |
| 836 | netdev_info(dev: qca->net_dev, format: "Failed to allocate RX sk_buff.\n" ); |
| 837 | return -ENOBUFS; |
| 838 | } |
| 839 | |
| 840 | return 0; |
| 841 | } |
| 842 | |
| 843 | static void |
| 844 | qcaspi_netdev_uninit(struct net_device *dev) |
| 845 | { |
| 846 | struct qcaspi *qca = netdev_priv(dev); |
| 847 | |
| 848 | kfree(objp: qca->rx_buffer); |
| 849 | qca->buffer_size = 0; |
| 850 | dev_kfree_skb(qca->rx_skb); |
| 851 | } |
| 852 | |
| 853 | static const struct net_device_ops qcaspi_netdev_ops = { |
| 854 | .ndo_init = qcaspi_netdev_init, |
| 855 | .ndo_uninit = qcaspi_netdev_uninit, |
| 856 | .ndo_open = qcaspi_netdev_open, |
| 857 | .ndo_stop = qcaspi_netdev_close, |
| 858 | .ndo_start_xmit = qcaspi_netdev_xmit, |
| 859 | .ndo_set_mac_address = eth_mac_addr, |
| 860 | .ndo_tx_timeout = qcaspi_netdev_tx_timeout, |
| 861 | .ndo_validate_addr = eth_validate_addr, |
| 862 | }; |
| 863 | |
| 864 | static void |
| 865 | qcaspi_netdev_setup(struct net_device *dev) |
| 866 | { |
| 867 | struct qcaspi *qca = NULL; |
| 868 | |
| 869 | dev->netdev_ops = &qcaspi_netdev_ops; |
| 870 | qcaspi_set_ethtool_ops(dev); |
| 871 | dev->watchdog_timeo = QCASPI_TX_TIMEOUT; |
| 872 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 873 | dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4); |
| 874 | dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4); |
| 875 | dev->tx_queue_len = 100; |
| 876 | |
| 877 | /* MTU range: 46 - 1500 */ |
| 878 | dev->min_mtu = QCAFRM_MIN_MTU; |
| 879 | dev->max_mtu = QCAFRM_MAX_MTU; |
| 880 | |
| 881 | qca = netdev_priv(dev); |
| 882 | memset(qca, 0, sizeof(struct qcaspi)); |
| 883 | |
| 884 | memset(&qca->txr, 0, sizeof(qca->txr)); |
| 885 | qca->txr.count = QCASPI_TX_RING_MAX_LEN; |
| 886 | } |
| 887 | |
| 888 | static const struct of_device_id qca_spi_of_match[] = { |
| 889 | { .compatible = "qca,qca7000" }, |
| 890 | { /* sentinel */ } |
| 891 | }; |
| 892 | MODULE_DEVICE_TABLE(of, qca_spi_of_match); |
| 893 | |
| 894 | static int |
| 895 | qca_spi_probe(struct spi_device *spi) |
| 896 | { |
| 897 | struct qcaspi *qca = NULL; |
| 898 | struct net_device *qcaspi_devs = NULL; |
| 899 | u8 legacy_mode = 0; |
| 900 | u16 signature; |
| 901 | int ret; |
| 902 | |
| 903 | if (!spi->dev.of_node) { |
| 904 | dev_err(&spi->dev, "Missing device tree\n" ); |
| 905 | return -EINVAL; |
| 906 | } |
| 907 | |
| 908 | legacy_mode = of_property_read_bool(np: spi->dev.of_node, |
| 909 | propname: "qca,legacy-mode" ); |
| 910 | |
| 911 | if (qcaspi_clkspeed) |
| 912 | spi->max_speed_hz = qcaspi_clkspeed; |
| 913 | else if (!spi->max_speed_hz) |
| 914 | spi->max_speed_hz = QCASPI_CLK_SPEED; |
| 915 | |
| 916 | if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN || |
| 917 | spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) { |
| 918 | dev_err(&spi->dev, "Invalid clkspeed: %u\n" , |
| 919 | spi->max_speed_hz); |
| 920 | return -EINVAL; |
| 921 | } |
| 922 | |
| 923 | if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || |
| 924 | (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { |
| 925 | dev_err(&spi->dev, "Invalid burst len: %d\n" , |
| 926 | qcaspi_burst_len); |
| 927 | return -EINVAL; |
| 928 | } |
| 929 | |
| 930 | if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || |
| 931 | (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { |
| 932 | dev_err(&spi->dev, "Invalid pluggable: %d\n" , |
| 933 | qcaspi_pluggable); |
| 934 | return -EINVAL; |
| 935 | } |
| 936 | |
| 937 | if (wr_verify < QCASPI_WRITE_VERIFY_MIN || |
| 938 | wr_verify > QCASPI_WRITE_VERIFY_MAX) { |
| 939 | dev_err(&spi->dev, "Invalid write verify: %d\n" , |
| 940 | wr_verify); |
| 941 | return -EINVAL; |
| 942 | } |
| 943 | |
| 944 | dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n" , |
| 945 | QCASPI_DRV_VERSION, |
| 946 | spi->max_speed_hz, |
| 947 | qcaspi_burst_len, |
| 948 | qcaspi_pluggable); |
| 949 | |
| 950 | spi->mode = SPI_MODE_3; |
| 951 | if (spi_setup(spi) < 0) { |
| 952 | dev_err(&spi->dev, "Unable to setup SPI device\n" ); |
| 953 | return -EFAULT; |
| 954 | } |
| 955 | |
| 956 | qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi)); |
| 957 | if (!qcaspi_devs) |
| 958 | return -ENOMEM; |
| 959 | |
| 960 | qcaspi_netdev_setup(dev: qcaspi_devs); |
| 961 | SET_NETDEV_DEV(qcaspi_devs, &spi->dev); |
| 962 | |
| 963 | qca = netdev_priv(dev: qcaspi_devs); |
| 964 | if (!qca) { |
| 965 | free_netdev(dev: qcaspi_devs); |
| 966 | dev_err(&spi->dev, "Fail to retrieve private structure\n" ); |
| 967 | return -ENOMEM; |
| 968 | } |
| 969 | qca->net_dev = qcaspi_devs; |
| 970 | qca->spi_dev = spi; |
| 971 | qca->legacy_mode = legacy_mode; |
| 972 | |
| 973 | spi_set_drvdata(spi, data: qcaspi_devs); |
| 974 | |
| 975 | ret = devm_request_irq(dev: &spi->dev, irq: spi->irq, handler: qcaspi_intr_handler, |
| 976 | IRQF_NO_AUTOEN, devname: qca->net_dev->name, dev_id: qca); |
| 977 | if (ret) { |
| 978 | dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n" , |
| 979 | spi->irq, ret); |
| 980 | free_netdev(dev: qcaspi_devs); |
| 981 | return ret; |
| 982 | } |
| 983 | |
| 984 | ret = of_get_ethdev_address(np: spi->dev.of_node, dev: qca->net_dev); |
| 985 | if (ret) { |
| 986 | eth_hw_addr_random(dev: qca->net_dev); |
| 987 | dev_info(&spi->dev, "Using random MAC address: %pM\n" , |
| 988 | qca->net_dev->dev_addr); |
| 989 | } |
| 990 | |
| 991 | netif_carrier_off(dev: qca->net_dev); |
| 992 | |
| 993 | if (!qcaspi_pluggable) { |
| 994 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 995 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, result: &signature); |
| 996 | |
| 997 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 998 | dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n" , |
| 999 | QCASPI_GOOD_SIGNATURE, signature); |
| 1000 | free_netdev(dev: qcaspi_devs); |
| 1001 | return -EFAULT; |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | if (register_netdev(dev: qcaspi_devs)) { |
| 1006 | dev_err(&spi->dev, "Unable to register net device %s\n" , |
| 1007 | qcaspi_devs->name); |
| 1008 | free_netdev(dev: qcaspi_devs); |
| 1009 | return -EFAULT; |
| 1010 | } |
| 1011 | |
| 1012 | qcaspi_init_device_debugfs(qca); |
| 1013 | |
| 1014 | return 0; |
| 1015 | } |
| 1016 | |
| 1017 | static void |
| 1018 | qca_spi_remove(struct spi_device *spi) |
| 1019 | { |
| 1020 | struct net_device *qcaspi_devs = spi_get_drvdata(spi); |
| 1021 | struct qcaspi *qca = netdev_priv(dev: qcaspi_devs); |
| 1022 | |
| 1023 | qcaspi_remove_device_debugfs(qca); |
| 1024 | |
| 1025 | unregister_netdev(dev: qcaspi_devs); |
| 1026 | free_netdev(dev: qcaspi_devs); |
| 1027 | } |
| 1028 | |
| 1029 | static const struct spi_device_id qca_spi_id[] = { |
| 1030 | { "qca7000" , 0 }, |
| 1031 | { /* sentinel */ } |
| 1032 | }; |
| 1033 | MODULE_DEVICE_TABLE(spi, qca_spi_id); |
| 1034 | |
| 1035 | static struct spi_driver qca_spi_driver = { |
| 1036 | .driver = { |
| 1037 | .name = QCASPI_DRV_NAME, |
| 1038 | .of_match_table = qca_spi_of_match, |
| 1039 | }, |
| 1040 | .id_table = qca_spi_id, |
| 1041 | .probe = qca_spi_probe, |
| 1042 | .remove = qca_spi_remove, |
| 1043 | }; |
| 1044 | module_spi_driver(qca_spi_driver); |
| 1045 | |
| 1046 | MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver" ); |
| 1047 | MODULE_AUTHOR("Qualcomm Atheros Communications" ); |
| 1048 | MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>" ); |
| 1049 | MODULE_LICENSE("Dual BSD/GPL" ); |
| 1050 | MODULE_VERSION(QCASPI_DRV_VERSION); |
| 1051 | |