1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause |
2 | /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. |
3 | * Parts of this driver are based on the following: |
4 | * - Kvaser linux pciefd driver (version 5.42) |
5 | * - PEAK linux canfd driver |
6 | */ |
7 | |
8 | #include <linux/bitfield.h> |
9 | #include <linux/can/dev.h> |
10 | #include <linux/device.h> |
11 | #include <linux/ethtool.h> |
12 | #include <linux/iopoll.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/minmax.h> |
15 | #include <linux/module.h> |
16 | #include <linux/netdevice.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/timer.h> |
19 | |
20 | MODULE_LICENSE("Dual BSD/GPL" ); |
21 | MODULE_AUTHOR("Kvaser AB <support@kvaser.com>" ); |
22 | MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices" ); |
23 | |
24 | #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" |
25 | |
26 | #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) |
27 | #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) |
28 | #define KVASER_PCIEFD_MAX_ERR_REP 256U |
29 | #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U |
30 | #define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL |
31 | #define KVASER_PCIEFD_DMA_COUNT 2U |
32 | |
33 | #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) |
34 | |
35 | #define KVASER_PCIEFD_VENDOR 0x1a07 |
36 | /* Altera based devices */ |
37 | #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d |
38 | #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e |
39 | #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f |
40 | #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 |
41 | #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 |
42 | |
43 | /* SmartFusion2 based devices */ |
44 | #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 |
45 | #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 |
46 | #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 |
47 | #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 |
48 | #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 |
49 | |
50 | /* Xilinx based devices */ |
51 | #define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017 |
52 | #define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019 |
53 | |
54 | /* Altera SerDes Enable 64-bit DMA address translation */ |
55 | #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) |
56 | |
57 | /* SmartFusion2 SerDes LSB address translation mask */ |
58 | #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) |
59 | |
60 | /* Xilinx SerDes LSB address translation mask */ |
61 | #define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12) |
62 | |
63 | /* Kvaser KCAN CAN controller registers */ |
64 | #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 |
65 | #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 |
66 | #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 |
67 | #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 |
68 | #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 |
69 | #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 |
70 | #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 |
71 | #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 |
72 | #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c |
73 | #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 |
74 | #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 |
75 | #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 |
76 | #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 |
77 | /* System identification and information registers */ |
78 | #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 |
79 | #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc |
80 | #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 |
81 | #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 |
82 | /* Shared receive buffer FIFO registers */ |
83 | #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 |
84 | /* Shared receive buffer registers */ |
85 | #define KVASER_PCIEFD_SRB_CMD_REG 0x0 |
86 | #define KVASER_PCIEFD_SRB_IEN_REG 0x04 |
87 | #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c |
88 | #define KVASER_PCIEFD_SRB_STAT_REG 0x10 |
89 | #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 |
90 | #define KVASER_PCIEFD_SRB_CTRL_REG 0x18 |
91 | |
92 | /* System build information fields */ |
93 | #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) |
94 | #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) |
95 | #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) |
96 | #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) |
97 | |
98 | /* Reset DMA buffer 0, 1 and FIFO offset */ |
99 | #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) |
100 | #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) |
101 | #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) |
102 | |
103 | /* DMA underflow, buffer 0 and 1 */ |
104 | #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) |
105 | #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) |
106 | /* DMA overflow, buffer 0 and 1 */ |
107 | #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) |
108 | #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) |
109 | /* DMA packet done, buffer 0 and 1 */ |
110 | #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) |
111 | #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) |
112 | |
113 | /* Got DMA support */ |
114 | #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) |
115 | /* DMA idle */ |
116 | #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) |
117 | |
118 | /* SRB current packet level */ |
119 | #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) |
120 | |
121 | /* DMA Enable */ |
122 | #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) |
123 | |
124 | /* KCAN CTRL packet types */ |
125 | #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) |
126 | #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 |
127 | #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 |
128 | |
129 | /* Command sequence number */ |
130 | #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) |
131 | /* Command bits */ |
132 | #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) |
133 | /* Abort, flush and reset */ |
134 | #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) |
135 | /* Request status packet */ |
136 | #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) |
137 | |
138 | /* Transmitter unaligned */ |
139 | #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) |
140 | /* Tx FIFO empty */ |
141 | #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) |
142 | /* Tx FIFO overflow */ |
143 | #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) |
144 | /* Tx buffer flush done */ |
145 | #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) |
146 | /* Abort done */ |
147 | #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) |
148 | /* Rx FIFO overflow */ |
149 | #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) |
150 | /* FDF bit when controller is in classic CAN mode */ |
151 | #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) |
152 | /* Bus parameter protection error */ |
153 | #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) |
154 | /* Tx FIFO unaligned end */ |
155 | #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) |
156 | /* Tx FIFO unaligned read */ |
157 | #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) |
158 | |
159 | /* Tx FIFO size */ |
160 | #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) |
161 | /* Tx FIFO current packet level */ |
162 | #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) |
163 | |
164 | /* Current status packet sequence number */ |
165 | #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) |
166 | /* Controller got CAN FD capability */ |
167 | #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) |
168 | /* Controller got one-shot capability */ |
169 | #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) |
170 | /* Controller in reset mode */ |
171 | #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) |
172 | /* Reset mode request */ |
173 | #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) |
174 | /* Bus off */ |
175 | #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) |
176 | /* Idle state. Controller in reset mode and no abort or flush pending */ |
177 | #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) |
178 | /* Abort request */ |
179 | #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) |
180 | /* Controller is bus off */ |
181 | #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ |
182 | (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ |
183 | KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) |
184 | |
185 | /* Classic CAN mode */ |
186 | #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) |
187 | /* Active error flag enable. Clear to force error passive */ |
188 | #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) |
189 | /* Acknowledgment packet type */ |
190 | #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) |
191 | /* CAN FD non-ISO */ |
192 | #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) |
193 | /* Error packet enable */ |
194 | #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) |
195 | /* Listen only mode */ |
196 | #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) |
197 | /* Reset mode */ |
198 | #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) |
199 | |
200 | /* BTRN and BTRD fields */ |
201 | #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) |
202 | #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) |
203 | #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) |
204 | #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) |
205 | |
206 | /* PWM Control fields */ |
207 | #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) |
208 | #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) |
209 | |
210 | /* KCAN packet type IDs */ |
211 | #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 |
212 | #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 |
213 | #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 |
214 | #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 |
215 | #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 |
216 | #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 |
217 | #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 |
218 | #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 |
219 | #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 |
220 | |
221 | /* Common KCAN packet definitions, second word */ |
222 | #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) |
223 | #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) |
224 | #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) |
225 | |
226 | /* KCAN Transmit/Receive data packet, first word */ |
227 | #define KVASER_PCIEFD_RPACKET_IDE BIT(30) |
228 | #define KVASER_PCIEFD_RPACKET_RTR BIT(29) |
229 | #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) |
230 | /* KCAN Transmit data packet, second word */ |
231 | #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) |
232 | #define KVASER_PCIEFD_TPACKET_SMS BIT(16) |
233 | /* KCAN Transmit/Receive data packet, second word */ |
234 | #define KVASER_PCIEFD_RPACKET_FDF BIT(15) |
235 | #define KVASER_PCIEFD_RPACKET_BRS BIT(14) |
236 | #define KVASER_PCIEFD_RPACKET_ESI BIT(13) |
237 | #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) |
238 | |
239 | /* KCAN Transmit acknowledge packet, first word */ |
240 | #define KVASER_PCIEFD_APACKET_NACK BIT(11) |
241 | #define KVASER_PCIEFD_APACKET_ABL BIT(10) |
242 | #define KVASER_PCIEFD_APACKET_CT BIT(9) |
243 | #define KVASER_PCIEFD_APACKET_FLU BIT(8) |
244 | |
245 | /* KCAN Status packet, first word */ |
246 | #define KVASER_PCIEFD_SPACK_RMCD BIT(22) |
247 | #define KVASER_PCIEFD_SPACK_IRM BIT(21) |
248 | #define KVASER_PCIEFD_SPACK_IDET BIT(20) |
249 | #define KVASER_PCIEFD_SPACK_BOFF BIT(16) |
250 | #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) |
251 | #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) |
252 | /* KCAN Status packet, second word */ |
253 | #define KVASER_PCIEFD_SPACK_EPLR BIT(24) |
254 | #define KVASER_PCIEFD_SPACK_EWLR BIT(23) |
255 | #define KVASER_PCIEFD_SPACK_AUTO BIT(21) |
256 | |
257 | /* KCAN Error detected packet, second word */ |
258 | #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) |
259 | |
260 | /* Macros for calculating addresses of registers */ |
261 | #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ |
262 | ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) |
263 | #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ |
264 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) |
265 | #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ |
266 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) |
267 | #define KVASER_PCIEFD_SERDES_ADDR(pcie) \ |
268 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) |
269 | #define KVASER_PCIEFD_SYSID_ADDR(pcie) \ |
270 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) |
271 | #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ |
272 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) |
273 | #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ |
274 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) |
275 | #define KVASER_PCIEFD_SRB_ADDR(pcie) \ |
276 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) |
277 | #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ |
278 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) |
279 | #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ |
280 | (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) |
281 | #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ |
282 | (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) |
283 | #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ |
284 | (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) |
285 | |
286 | struct kvaser_pciefd; |
287 | static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, |
288 | dma_addr_t addr, int index); |
289 | static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, |
290 | dma_addr_t addr, int index); |
291 | static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, |
292 | dma_addr_t addr, int index); |
293 | |
294 | struct kvaser_pciefd_address_offset { |
295 | u32 serdes; |
296 | u32 pci_ien; |
297 | u32 pci_irq; |
298 | u32 sysid; |
299 | u32 loopback; |
300 | u32 kcan_srb_fifo; |
301 | u32 kcan_srb; |
302 | u32 kcan_ch0; |
303 | u32 kcan_ch1; |
304 | }; |
305 | |
306 | struct kvaser_pciefd_dev_ops { |
307 | void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, |
308 | dma_addr_t addr, int index); |
309 | }; |
310 | |
311 | struct kvaser_pciefd_irq_mask { |
312 | u32 kcan_rx0; |
313 | u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; |
314 | u32 all; |
315 | }; |
316 | |
317 | struct kvaser_pciefd_driver_data { |
318 | const struct kvaser_pciefd_address_offset *address_offset; |
319 | const struct kvaser_pciefd_irq_mask *irq_mask; |
320 | const struct kvaser_pciefd_dev_ops *ops; |
321 | }; |
322 | |
323 | static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { |
324 | .serdes = 0x1000, |
325 | .pci_ien = 0x50, |
326 | .pci_irq = 0x40, |
327 | .sysid = 0x1f020, |
328 | .loopback = 0x1f000, |
329 | .kcan_srb_fifo = 0x1f200, |
330 | .kcan_srb = 0x1f400, |
331 | .kcan_ch0 = 0x10000, |
332 | .kcan_ch1 = 0x11000, |
333 | }; |
334 | |
335 | static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { |
336 | .serdes = 0x280c8, |
337 | .pci_ien = 0x102004, |
338 | .pci_irq = 0x102008, |
339 | .sysid = 0x100000, |
340 | .loopback = 0x103000, |
341 | .kcan_srb_fifo = 0x120000, |
342 | .kcan_srb = 0x121000, |
343 | .kcan_ch0 = 0x140000, |
344 | .kcan_ch1 = 0x142000, |
345 | }; |
346 | |
347 | static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = { |
348 | .serdes = 0x00208, |
349 | .pci_ien = 0x102004, |
350 | .pci_irq = 0x102008, |
351 | .sysid = 0x100000, |
352 | .loopback = 0x103000, |
353 | .kcan_srb_fifo = 0x120000, |
354 | .kcan_srb = 0x121000, |
355 | .kcan_ch0 = 0x140000, |
356 | .kcan_ch1 = 0x142000, |
357 | }; |
358 | |
359 | static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { |
360 | .kcan_rx0 = BIT(4), |
361 | .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, |
362 | .all = GENMASK(4, 0), |
363 | }; |
364 | |
365 | static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { |
366 | .kcan_rx0 = BIT(4), |
367 | .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, |
368 | .all = GENMASK(19, 16) | BIT(4), |
369 | }; |
370 | |
371 | static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = { |
372 | .kcan_rx0 = BIT(4), |
373 | .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) }, |
374 | .all = GENMASK(23, 16) | BIT(4), |
375 | }; |
376 | |
377 | static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { |
378 | .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, |
379 | }; |
380 | |
381 | static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { |
382 | .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, |
383 | }; |
384 | |
385 | static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = { |
386 | .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx, |
387 | }; |
388 | |
389 | static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { |
390 | .address_offset = &kvaser_pciefd_altera_address_offset, |
391 | .irq_mask = &kvaser_pciefd_altera_irq_mask, |
392 | .ops = &kvaser_pciefd_altera_dev_ops, |
393 | }; |
394 | |
395 | static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { |
396 | .address_offset = &kvaser_pciefd_sf2_address_offset, |
397 | .irq_mask = &kvaser_pciefd_sf2_irq_mask, |
398 | .ops = &kvaser_pciefd_sf2_dev_ops, |
399 | }; |
400 | |
401 | static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = { |
402 | .address_offset = &kvaser_pciefd_xilinx_address_offset, |
403 | .irq_mask = &kvaser_pciefd_xilinx_irq_mask, |
404 | .ops = &kvaser_pciefd_xilinx_dev_ops, |
405 | }; |
406 | |
407 | struct kvaser_pciefd_can { |
408 | struct can_priv can; |
409 | struct kvaser_pciefd *kv_pcie; |
410 | void __iomem *reg_base; |
411 | struct can_berr_counter bec; |
412 | u8 cmd_seq; |
413 | int err_rep_cnt; |
414 | int echo_idx; |
415 | spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ |
416 | spinlock_t echo_lock; /* Locks the message echo buffer */ |
417 | struct timer_list bec_poll_timer; |
418 | struct completion start_comp, flush_comp; |
419 | }; |
420 | |
421 | struct kvaser_pciefd { |
422 | struct pci_dev *pci; |
423 | void __iomem *reg_base; |
424 | struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; |
425 | const struct kvaser_pciefd_driver_data *driver_data; |
426 | void *dma_data[KVASER_PCIEFD_DMA_COUNT]; |
427 | u8 nr_channels; |
428 | u32 bus_freq; |
429 | u32 freq; |
430 | u32 freq_to_ticks_div; |
431 | }; |
432 | |
433 | struct kvaser_pciefd_rx_packet { |
434 | u32 [2]; |
435 | u64 timestamp; |
436 | }; |
437 | |
438 | struct kvaser_pciefd_tx_packet { |
439 | u32 [2]; |
440 | u8 data[64]; |
441 | }; |
442 | |
443 | static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { |
444 | .name = KVASER_PCIEFD_DRV_NAME, |
445 | .tseg1_min = 1, |
446 | .tseg1_max = 512, |
447 | .tseg2_min = 1, |
448 | .tseg2_max = 32, |
449 | .sjw_max = 16, |
450 | .brp_min = 1, |
451 | .brp_max = 8192, |
452 | .brp_inc = 1, |
453 | }; |
454 | |
455 | static struct pci_device_id kvaser_pciefd_id_table[] = { |
456 | { |
457 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), |
458 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, |
459 | }, |
460 | { |
461 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), |
462 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, |
463 | }, |
464 | { |
465 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), |
466 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, |
467 | }, |
468 | { |
469 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), |
470 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, |
471 | }, |
472 | { |
473 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), |
474 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, |
475 | }, |
476 | { |
477 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), |
478 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, |
479 | }, |
480 | { |
481 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), |
482 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, |
483 | }, |
484 | { |
485 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), |
486 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, |
487 | }, |
488 | { |
489 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), |
490 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, |
491 | }, |
492 | { |
493 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), |
494 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, |
495 | }, |
496 | { |
497 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID), |
498 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, |
499 | }, |
500 | { |
501 | PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID), |
502 | .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, |
503 | }, |
504 | { |
505 | 0, |
506 | }, |
507 | }; |
508 | MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); |
509 | |
510 | static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) |
511 | { |
512 | iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | |
513 | FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), |
514 | can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); |
515 | } |
516 | |
517 | static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) |
518 | { |
519 | kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); |
520 | } |
521 | |
522 | static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) |
523 | { |
524 | kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); |
525 | } |
526 | |
527 | static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) |
528 | { |
529 | u32 mode; |
530 | unsigned long irq; |
531 | |
532 | spin_lock_irqsave(&can->lock, irq); |
533 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
534 | if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { |
535 | mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; |
536 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
537 | } |
538 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
539 | } |
540 | |
541 | static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) |
542 | { |
543 | u32 mode; |
544 | unsigned long irq; |
545 | |
546 | spin_lock_irqsave(&can->lock, irq); |
547 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
548 | mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; |
549 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
550 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
551 | } |
552 | |
553 | static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) |
554 | { |
555 | u32 msk; |
556 | |
557 | msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | |
558 | KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | |
559 | KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | |
560 | KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | |
561 | KVASER_PCIEFD_KCAN_IRQ_TAR; |
562 | |
563 | iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
564 | } |
565 | |
566 | static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, |
567 | struct sk_buff *skb, u64 timestamp) |
568 | { |
569 | skb_hwtstamps(skb)->hwtstamp = |
570 | ns_to_ktime(ns: div_u64(dividend: timestamp * 1000, divisor: pcie->freq_to_ticks_div)); |
571 | } |
572 | |
573 | static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) |
574 | { |
575 | u32 mode; |
576 | unsigned long irq; |
577 | |
578 | spin_lock_irqsave(&can->lock, irq); |
579 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
580 | if (can->can.ctrlmode & CAN_CTRLMODE_FD) { |
581 | mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; |
582 | if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) |
583 | mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; |
584 | else |
585 | mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; |
586 | } else { |
587 | mode |= KVASER_PCIEFD_KCAN_MODE_CCM; |
588 | mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; |
589 | } |
590 | |
591 | if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) |
592 | mode |= KVASER_PCIEFD_KCAN_MODE_LOM; |
593 | else |
594 | mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; |
595 | mode |= KVASER_PCIEFD_KCAN_MODE_EEN; |
596 | mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; |
597 | /* Use ACK packet type */ |
598 | mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; |
599 | mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; |
600 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
601 | |
602 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
603 | } |
604 | |
605 | static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) |
606 | { |
607 | u32 status; |
608 | unsigned long irq; |
609 | |
610 | spin_lock_irqsave(&can->lock, irq); |
611 | iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
612 | iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, |
613 | can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
614 | status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); |
615 | if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { |
616 | /* If controller is already idle, run abort, flush and reset */ |
617 | kvaser_pciefd_abort_flush_reset(can); |
618 | } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { |
619 | u32 mode; |
620 | |
621 | /* Put controller in reset mode */ |
622 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
623 | mode |= KVASER_PCIEFD_KCAN_MODE_RM; |
624 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
625 | } |
626 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
627 | } |
628 | |
629 | static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) |
630 | { |
631 | u32 mode; |
632 | unsigned long irq; |
633 | |
634 | del_timer(timer: &can->bec_poll_timer); |
635 | if (!completion_done(x: &can->flush_comp)) |
636 | kvaser_pciefd_start_controller_flush(can); |
637 | |
638 | if (!wait_for_completion_timeout(x: &can->flush_comp, |
639 | KVASER_PCIEFD_WAIT_TIMEOUT)) { |
640 | netdev_err(dev: can->can.dev, format: "Timeout during bus on flush\n" ); |
641 | return -ETIMEDOUT; |
642 | } |
643 | |
644 | spin_lock_irqsave(&can->lock, irq); |
645 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
646 | iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
647 | iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, |
648 | can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
649 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
650 | mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; |
651 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
652 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
653 | |
654 | if (!wait_for_completion_timeout(x: &can->start_comp, |
655 | KVASER_PCIEFD_WAIT_TIMEOUT)) { |
656 | netdev_err(dev: can->can.dev, format: "Timeout during bus on reset\n" ); |
657 | return -ETIMEDOUT; |
658 | } |
659 | /* Reset interrupt handling */ |
660 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
661 | iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
662 | |
663 | kvaser_pciefd_set_tx_irq(can); |
664 | kvaser_pciefd_setup_controller(can); |
665 | can->can.state = CAN_STATE_ERROR_ACTIVE; |
666 | netif_wake_queue(dev: can->can.dev); |
667 | can->bec.txerr = 0; |
668 | can->bec.rxerr = 0; |
669 | can->err_rep_cnt = 0; |
670 | |
671 | return 0; |
672 | } |
673 | |
674 | static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) |
675 | { |
676 | u8 top; |
677 | u32 pwm_ctrl; |
678 | unsigned long irq; |
679 | |
680 | spin_lock_irqsave(&can->lock, irq); |
681 | pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); |
682 | top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); |
683 | /* Set duty cycle to zero */ |
684 | pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); |
685 | iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); |
686 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
687 | } |
688 | |
689 | static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) |
690 | { |
691 | int top, trigger; |
692 | u32 pwm_ctrl; |
693 | unsigned long irq; |
694 | |
695 | kvaser_pciefd_pwm_stop(can); |
696 | spin_lock_irqsave(&can->lock, irq); |
697 | /* Set frequency to 500 KHz */ |
698 | top = can->kv_pcie->bus_freq / (2 * 500000) - 1; |
699 | |
700 | pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); |
701 | pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); |
702 | iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); |
703 | |
704 | /* Set duty cycle to 95 */ |
705 | trigger = (100 * top - 95 * (top + 1) + 50) / 100; |
706 | pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); |
707 | pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); |
708 | iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); |
709 | spin_unlock_irqrestore(lock: &can->lock, flags: irq); |
710 | } |
711 | |
712 | static int kvaser_pciefd_open(struct net_device *netdev) |
713 | { |
714 | int err; |
715 | struct kvaser_pciefd_can *can = netdev_priv(dev: netdev); |
716 | |
717 | err = open_candev(dev: netdev); |
718 | if (err) |
719 | return err; |
720 | |
721 | err = kvaser_pciefd_bus_on(can); |
722 | if (err) { |
723 | close_candev(dev: netdev); |
724 | return err; |
725 | } |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | static int kvaser_pciefd_stop(struct net_device *netdev) |
731 | { |
732 | struct kvaser_pciefd_can *can = netdev_priv(dev: netdev); |
733 | int ret = 0; |
734 | |
735 | /* Don't interrupt ongoing flush */ |
736 | if (!completion_done(x: &can->flush_comp)) |
737 | kvaser_pciefd_start_controller_flush(can); |
738 | |
739 | if (!wait_for_completion_timeout(x: &can->flush_comp, |
740 | KVASER_PCIEFD_WAIT_TIMEOUT)) { |
741 | netdev_err(dev: can->can.dev, format: "Timeout during stop\n" ); |
742 | ret = -ETIMEDOUT; |
743 | } else { |
744 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
745 | del_timer(timer: &can->bec_poll_timer); |
746 | } |
747 | can->can.state = CAN_STATE_STOPPED; |
748 | close_candev(dev: netdev); |
749 | |
750 | return ret; |
751 | } |
752 | |
753 | static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, |
754 | struct kvaser_pciefd_can *can, |
755 | struct sk_buff *skb) |
756 | { |
757 | struct canfd_frame *cf = (struct canfd_frame *)skb->data; |
758 | int packet_size; |
759 | int seq = can->echo_idx; |
760 | |
761 | memset(p, 0, sizeof(*p)); |
762 | if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) |
763 | p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; |
764 | |
765 | if (cf->can_id & CAN_RTR_FLAG) |
766 | p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; |
767 | |
768 | if (cf->can_id & CAN_EFF_FLAG) |
769 | p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; |
770 | |
771 | p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); |
772 | p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; |
773 | |
774 | if (can_is_canfd_skb(skb)) { |
775 | p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, |
776 | can_fd_len2dlc(cf->len)); |
777 | p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; |
778 | if (cf->flags & CANFD_BRS) |
779 | p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; |
780 | if (cf->flags & CANFD_ESI) |
781 | p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; |
782 | } else { |
783 | p->header[1] |= |
784 | FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, |
785 | can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); |
786 | } |
787 | |
788 | p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); |
789 | |
790 | packet_size = cf->len; |
791 | memcpy(p->data, cf->data, packet_size); |
792 | |
793 | return DIV_ROUND_UP(packet_size, 4); |
794 | } |
795 | |
796 | static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, |
797 | struct net_device *netdev) |
798 | { |
799 | struct kvaser_pciefd_can *can = netdev_priv(dev: netdev); |
800 | unsigned long irq_flags; |
801 | struct kvaser_pciefd_tx_packet packet; |
802 | int nr_words; |
803 | u8 count; |
804 | |
805 | if (can_dev_dropped_skb(dev: netdev, skb)) |
806 | return NETDEV_TX_OK; |
807 | |
808 | nr_words = kvaser_pciefd_prepare_tx_packet(p: &packet, can, skb); |
809 | |
810 | spin_lock_irqsave(&can->echo_lock, irq_flags); |
811 | /* Prepare and save echo skb in internal slot */ |
812 | can_put_echo_skb(skb, dev: netdev, idx: can->echo_idx, frame_len: 0); |
813 | |
814 | /* Move echo index to the next slot */ |
815 | can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; |
816 | |
817 | /* Write header to fifo */ |
818 | iowrite32(packet.header[0], |
819 | can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); |
820 | iowrite32(packet.header[1], |
821 | can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); |
822 | |
823 | if (nr_words) { |
824 | u32 data_last = ((u32 *)packet.data)[nr_words - 1]; |
825 | |
826 | /* Write data to fifo, except last word */ |
827 | iowrite32_rep(port: can->reg_base + |
828 | KVASER_PCIEFD_KCAN_FIFO_REG, buf: packet.data, |
829 | count: nr_words - 1); |
830 | /* Write last word to end of fifo */ |
831 | __raw_writel(val: data_last, addr: can->reg_base + |
832 | KVASER_PCIEFD_KCAN_FIFO_LAST_REG); |
833 | } else { |
834 | /* Complete write to fifo */ |
835 | __raw_writel(val: 0, addr: can->reg_base + |
836 | KVASER_PCIEFD_KCAN_FIFO_LAST_REG); |
837 | } |
838 | |
839 | count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, |
840 | ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); |
841 | /* No room for a new message, stop the queue until at least one |
842 | * successful transmit |
843 | */ |
844 | if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) |
845 | netif_stop_queue(dev: netdev); |
846 | spin_unlock_irqrestore(lock: &can->echo_lock, flags: irq_flags); |
847 | |
848 | return NETDEV_TX_OK; |
849 | } |
850 | |
851 | static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) |
852 | { |
853 | u32 mode, test, btrn; |
854 | unsigned long irq_flags; |
855 | int ret; |
856 | struct can_bittiming *bt; |
857 | |
858 | if (data) |
859 | bt = &can->can.data_bittiming; |
860 | else |
861 | bt = &can->can.bittiming; |
862 | |
863 | btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | |
864 | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | |
865 | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | |
866 | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); |
867 | |
868 | spin_lock_irqsave(&can->lock, irq_flags); |
869 | mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
870 | /* Put the circuit in reset mode */ |
871 | iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, |
872 | can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
873 | |
874 | /* Can only set bittiming if in reset mode */ |
875 | ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, |
876 | test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); |
877 | if (ret) { |
878 | spin_unlock_irqrestore(lock: &can->lock, flags: irq_flags); |
879 | return -EBUSY; |
880 | } |
881 | |
882 | if (data) |
883 | iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); |
884 | else |
885 | iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); |
886 | /* Restore previous reset mode status */ |
887 | iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); |
888 | spin_unlock_irqrestore(lock: &can->lock, flags: irq_flags); |
889 | |
890 | return 0; |
891 | } |
892 | |
893 | static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) |
894 | { |
895 | return kvaser_pciefd_set_bittiming(can: netdev_priv(dev: ndev), data: false); |
896 | } |
897 | |
898 | static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) |
899 | { |
900 | return kvaser_pciefd_set_bittiming(can: netdev_priv(dev: ndev), data: true); |
901 | } |
902 | |
903 | static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) |
904 | { |
905 | struct kvaser_pciefd_can *can = netdev_priv(dev: ndev); |
906 | int ret = 0; |
907 | |
908 | switch (mode) { |
909 | case CAN_MODE_START: |
910 | if (!can->can.restart_ms) |
911 | ret = kvaser_pciefd_bus_on(can); |
912 | break; |
913 | default: |
914 | return -EOPNOTSUPP; |
915 | } |
916 | |
917 | return ret; |
918 | } |
919 | |
920 | static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, |
921 | struct can_berr_counter *bec) |
922 | { |
923 | struct kvaser_pciefd_can *can = netdev_priv(dev: ndev); |
924 | |
925 | bec->rxerr = can->bec.rxerr; |
926 | bec->txerr = can->bec.txerr; |
927 | |
928 | return 0; |
929 | } |
930 | |
931 | static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) |
932 | { |
933 | struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); |
934 | |
935 | kvaser_pciefd_enable_err_gen(can); |
936 | kvaser_pciefd_request_status(can); |
937 | can->err_rep_cnt = 0; |
938 | } |
939 | |
940 | static const struct net_device_ops kvaser_pciefd_netdev_ops = { |
941 | .ndo_open = kvaser_pciefd_open, |
942 | .ndo_stop = kvaser_pciefd_stop, |
943 | .ndo_eth_ioctl = can_eth_ioctl_hwts, |
944 | .ndo_start_xmit = kvaser_pciefd_start_xmit, |
945 | .ndo_change_mtu = can_change_mtu, |
946 | }; |
947 | |
948 | static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { |
949 | .get_ts_info = can_ethtool_op_get_ts_info_hwts, |
950 | }; |
951 | |
952 | static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) |
953 | { |
954 | int i; |
955 | |
956 | for (i = 0; i < pcie->nr_channels; i++) { |
957 | struct net_device *netdev; |
958 | struct kvaser_pciefd_can *can; |
959 | u32 status, tx_nr_packets_max; |
960 | |
961 | netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), |
962 | KVASER_PCIEFD_CAN_TX_MAX_COUNT); |
963 | if (!netdev) |
964 | return -ENOMEM; |
965 | |
966 | can = netdev_priv(dev: netdev); |
967 | netdev->netdev_ops = &kvaser_pciefd_netdev_ops; |
968 | netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; |
969 | can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); |
970 | can->kv_pcie = pcie; |
971 | can->cmd_seq = 0; |
972 | can->err_rep_cnt = 0; |
973 | can->bec.txerr = 0; |
974 | can->bec.rxerr = 0; |
975 | |
976 | init_completion(x: &can->start_comp); |
977 | init_completion(x: &can->flush_comp); |
978 | timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); |
979 | |
980 | /* Disable Bus load reporting */ |
981 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); |
982 | |
983 | tx_nr_packets_max = |
984 | FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, |
985 | ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); |
986 | |
987 | can->can.clock.freq = pcie->freq; |
988 | can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); |
989 | can->echo_idx = 0; |
990 | spin_lock_init(&can->echo_lock); |
991 | spin_lock_init(&can->lock); |
992 | |
993 | can->can.bittiming_const = &kvaser_pciefd_bittiming_const; |
994 | can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; |
995 | can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; |
996 | can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; |
997 | can->can.do_set_mode = kvaser_pciefd_set_mode; |
998 | can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; |
999 | can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | |
1000 | CAN_CTRLMODE_FD | |
1001 | CAN_CTRLMODE_FD_NON_ISO | |
1002 | CAN_CTRLMODE_CC_LEN8_DLC; |
1003 | |
1004 | status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); |
1005 | if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { |
1006 | dev_err(&pcie->pci->dev, |
1007 | "CAN FD not supported as expected %d\n" , i); |
1008 | |
1009 | free_candev(dev: netdev); |
1010 | return -ENODEV; |
1011 | } |
1012 | |
1013 | if (status & KVASER_PCIEFD_KCAN_STAT_CAP) |
1014 | can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; |
1015 | |
1016 | netdev->flags |= IFF_ECHO; |
1017 | SET_NETDEV_DEV(netdev, &pcie->pci->dev); |
1018 | |
1019 | iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
1020 | iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, |
1021 | can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
1022 | |
1023 | pcie->can[i] = can; |
1024 | kvaser_pciefd_pwm_start(can); |
1025 | } |
1026 | |
1027 | return 0; |
1028 | } |
1029 | |
1030 | static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) |
1031 | { |
1032 | int i; |
1033 | |
1034 | for (i = 0; i < pcie->nr_channels; i++) { |
1035 | int err = register_candev(dev: pcie->can[i]->can.dev); |
1036 | |
1037 | if (err) { |
1038 | int j; |
1039 | |
1040 | /* Unregister all successfully registered devices. */ |
1041 | for (j = 0; j < i; j++) |
1042 | unregister_candev(dev: pcie->can[j]->can.dev); |
1043 | return err; |
1044 | } |
1045 | } |
1046 | |
1047 | return 0; |
1048 | } |
1049 | |
1050 | static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, |
1051 | dma_addr_t addr, int index) |
1052 | { |
1053 | void __iomem *serdes_base; |
1054 | u32 word1, word2; |
1055 | |
1056 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1057 | word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; |
1058 | word2 = addr >> 32; |
1059 | #else |
1060 | word1 = addr; |
1061 | word2 = 0; |
1062 | #endif |
1063 | serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; |
1064 | iowrite32(word1, serdes_base); |
1065 | iowrite32(word2, serdes_base + 0x4); |
1066 | } |
1067 | |
1068 | static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, |
1069 | dma_addr_t addr, int index) |
1070 | { |
1071 | void __iomem *serdes_base; |
1072 | u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; |
1073 | u32 msb = 0x0; |
1074 | |
1075 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1076 | msb = addr >> 32; |
1077 | #endif |
1078 | serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; |
1079 | iowrite32(lsb, serdes_base); |
1080 | iowrite32(msb, serdes_base + 0x4); |
1081 | } |
1082 | |
1083 | static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, |
1084 | dma_addr_t addr, int index) |
1085 | { |
1086 | void __iomem *serdes_base; |
1087 | u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; |
1088 | u32 msb = 0x0; |
1089 | |
1090 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1091 | msb = addr >> 32; |
1092 | #endif |
1093 | serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; |
1094 | iowrite32(msb, serdes_base); |
1095 | iowrite32(lsb, serdes_base + 0x4); |
1096 | } |
1097 | |
1098 | static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) |
1099 | { |
1100 | int i; |
1101 | u32 srb_status; |
1102 | u32 srb_packet_count; |
1103 | dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; |
1104 | |
1105 | /* Disable the DMA */ |
1106 | iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); |
1107 | for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { |
1108 | pcie->dma_data[i] = dmam_alloc_coherent(dev: &pcie->pci->dev, |
1109 | KVASER_PCIEFD_DMA_SIZE, |
1110 | dma_handle: &dma_addr[i], |
1111 | GFP_KERNEL); |
1112 | |
1113 | if (!pcie->dma_data[i] || !dma_addr[i]) { |
1114 | dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n" , |
1115 | KVASER_PCIEFD_DMA_SIZE); |
1116 | return -ENOMEM; |
1117 | } |
1118 | pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); |
1119 | } |
1120 | |
1121 | /* Reset Rx FIFO, and both DMA buffers */ |
1122 | iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | |
1123 | KVASER_PCIEFD_SRB_CMD_RDB1, |
1124 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); |
1125 | /* Empty Rx FIFO */ |
1126 | srb_packet_count = |
1127 | FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, |
1128 | ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + |
1129 | KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); |
1130 | while (srb_packet_count) { |
1131 | /* Drop current packet in FIFO */ |
1132 | ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); |
1133 | srb_packet_count--; |
1134 | } |
1135 | |
1136 | srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); |
1137 | if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { |
1138 | dev_err(&pcie->pci->dev, "DMA not idle before enabling\n" ); |
1139 | return -EIO; |
1140 | } |
1141 | |
1142 | /* Enable the DMA */ |
1143 | iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, |
1144 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); |
1145 | |
1146 | return 0; |
1147 | } |
1148 | |
1149 | static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) |
1150 | { |
1151 | u32 version, srb_status, build; |
1152 | |
1153 | version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); |
1154 | pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, |
1155 | FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); |
1156 | |
1157 | build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); |
1158 | dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n" , |
1159 | FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), |
1160 | FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), |
1161 | FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); |
1162 | |
1163 | srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); |
1164 | if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { |
1165 | dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n" ); |
1166 | return -ENODEV; |
1167 | } |
1168 | |
1169 | pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); |
1170 | pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); |
1171 | pcie->freq_to_ticks_div = pcie->freq / 1000000; |
1172 | if (pcie->freq_to_ticks_div == 0) |
1173 | pcie->freq_to_ticks_div = 1; |
1174 | /* Turn off all loopback functionality */ |
1175 | iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); |
1176 | |
1177 | return 0; |
1178 | } |
1179 | |
1180 | static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, |
1181 | struct kvaser_pciefd_rx_packet *p, |
1182 | __le32 *data) |
1183 | { |
1184 | struct sk_buff *skb; |
1185 | struct canfd_frame *cf; |
1186 | struct can_priv *priv; |
1187 | u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); |
1188 | u8 dlc; |
1189 | |
1190 | if (ch_id >= pcie->nr_channels) |
1191 | return -EIO; |
1192 | |
1193 | priv = &pcie->can[ch_id]->can; |
1194 | dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); |
1195 | |
1196 | if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { |
1197 | skb = alloc_canfd_skb(dev: priv->dev, cfd: &cf); |
1198 | if (!skb) { |
1199 | priv->dev->stats.rx_dropped++; |
1200 | return -ENOMEM; |
1201 | } |
1202 | |
1203 | cf->len = can_fd_dlc2len(dlc); |
1204 | if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) |
1205 | cf->flags |= CANFD_BRS; |
1206 | if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) |
1207 | cf->flags |= CANFD_ESI; |
1208 | } else { |
1209 | skb = alloc_can_skb(dev: priv->dev, cf: (struct can_frame **)&cf); |
1210 | if (!skb) { |
1211 | priv->dev->stats.rx_dropped++; |
1212 | return -ENOMEM; |
1213 | } |
1214 | can_frame_set_cc_len(cf: (struct can_frame *)cf, dlc, ctrlmode: priv->ctrlmode); |
1215 | } |
1216 | |
1217 | cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); |
1218 | if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) |
1219 | cf->can_id |= CAN_EFF_FLAG; |
1220 | |
1221 | if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { |
1222 | cf->can_id |= CAN_RTR_FLAG; |
1223 | } else { |
1224 | memcpy(cf->data, data, cf->len); |
1225 | priv->dev->stats.rx_bytes += cf->len; |
1226 | } |
1227 | priv->dev->stats.rx_packets++; |
1228 | kvaser_pciefd_set_skb_timestamp(pcie, skb, timestamp: p->timestamp); |
1229 | |
1230 | return netif_rx(skb); |
1231 | } |
1232 | |
1233 | static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, |
1234 | struct can_frame *cf, |
1235 | enum can_state new_state, |
1236 | enum can_state tx_state, |
1237 | enum can_state rx_state) |
1238 | { |
1239 | can_change_state(dev: can->can.dev, cf, tx_state, rx_state); |
1240 | |
1241 | if (new_state == CAN_STATE_BUS_OFF) { |
1242 | struct net_device *ndev = can->can.dev; |
1243 | unsigned long irq_flags; |
1244 | |
1245 | spin_lock_irqsave(&can->lock, irq_flags); |
1246 | netif_stop_queue(dev: can->can.dev); |
1247 | spin_unlock_irqrestore(lock: &can->lock, flags: irq_flags); |
1248 | /* Prevent CAN controller from auto recover from bus off */ |
1249 | if (!can->can.restart_ms) { |
1250 | kvaser_pciefd_start_controller_flush(can); |
1251 | can_bus_off(dev: ndev); |
1252 | } |
1253 | } |
1254 | } |
1255 | |
1256 | static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, |
1257 | struct can_berr_counter *bec, |
1258 | enum can_state *new_state, |
1259 | enum can_state *tx_state, |
1260 | enum can_state *rx_state) |
1261 | { |
1262 | if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || |
1263 | p->header[0] & KVASER_PCIEFD_SPACK_IRM) |
1264 | *new_state = CAN_STATE_BUS_OFF; |
1265 | else if (bec->txerr >= 255 || bec->rxerr >= 255) |
1266 | *new_state = CAN_STATE_BUS_OFF; |
1267 | else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) |
1268 | *new_state = CAN_STATE_ERROR_PASSIVE; |
1269 | else if (bec->txerr >= 128 || bec->rxerr >= 128) |
1270 | *new_state = CAN_STATE_ERROR_PASSIVE; |
1271 | else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) |
1272 | *new_state = CAN_STATE_ERROR_WARNING; |
1273 | else if (bec->txerr >= 96 || bec->rxerr >= 96) |
1274 | *new_state = CAN_STATE_ERROR_WARNING; |
1275 | else |
1276 | *new_state = CAN_STATE_ERROR_ACTIVE; |
1277 | |
1278 | *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; |
1279 | *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; |
1280 | } |
1281 | |
1282 | static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, |
1283 | struct kvaser_pciefd_rx_packet *p) |
1284 | { |
1285 | struct can_berr_counter bec; |
1286 | enum can_state old_state, new_state, tx_state, rx_state; |
1287 | struct net_device *ndev = can->can.dev; |
1288 | struct sk_buff *skb; |
1289 | struct can_frame *cf = NULL; |
1290 | |
1291 | old_state = can->can.state; |
1292 | |
1293 | bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); |
1294 | bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); |
1295 | |
1296 | kvaser_pciefd_packet_to_state(p, bec: &bec, new_state: &new_state, tx_state: &tx_state, rx_state: &rx_state); |
1297 | skb = alloc_can_err_skb(dev: ndev, cf: &cf); |
1298 | if (new_state != old_state) { |
1299 | kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); |
1300 | if (old_state == CAN_STATE_BUS_OFF && |
1301 | new_state == CAN_STATE_ERROR_ACTIVE && |
1302 | can->can.restart_ms) { |
1303 | can->can.can_stats.restarts++; |
1304 | if (skb) |
1305 | cf->can_id |= CAN_ERR_RESTARTED; |
1306 | } |
1307 | } |
1308 | |
1309 | can->err_rep_cnt++; |
1310 | can->can.can_stats.bus_error++; |
1311 | if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) |
1312 | ndev->stats.tx_errors++; |
1313 | else |
1314 | ndev->stats.rx_errors++; |
1315 | |
1316 | can->bec.txerr = bec.txerr; |
1317 | can->bec.rxerr = bec.rxerr; |
1318 | |
1319 | if (!skb) { |
1320 | ndev->stats.rx_dropped++; |
1321 | return -ENOMEM; |
1322 | } |
1323 | |
1324 | kvaser_pciefd_set_skb_timestamp(pcie: can->kv_pcie, skb, timestamp: p->timestamp); |
1325 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; |
1326 | cf->data[6] = bec.txerr; |
1327 | cf->data[7] = bec.rxerr; |
1328 | |
1329 | netif_rx(skb); |
1330 | |
1331 | return 0; |
1332 | } |
1333 | |
1334 | static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, |
1335 | struct kvaser_pciefd_rx_packet *p) |
1336 | { |
1337 | struct kvaser_pciefd_can *can; |
1338 | u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); |
1339 | |
1340 | if (ch_id >= pcie->nr_channels) |
1341 | return -EIO; |
1342 | |
1343 | can = pcie->can[ch_id]; |
1344 | kvaser_pciefd_rx_error_frame(can, p); |
1345 | if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) |
1346 | /* Do not report more errors, until bec_poll_timer expires */ |
1347 | kvaser_pciefd_disable_err_gen(can); |
1348 | /* Start polling the error counters */ |
1349 | mod_timer(timer: &can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); |
1350 | |
1351 | return 0; |
1352 | } |
1353 | |
1354 | static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, |
1355 | struct kvaser_pciefd_rx_packet *p) |
1356 | { |
1357 | struct can_berr_counter bec; |
1358 | enum can_state old_state, new_state, tx_state, rx_state; |
1359 | |
1360 | old_state = can->can.state; |
1361 | |
1362 | bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); |
1363 | bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); |
1364 | |
1365 | kvaser_pciefd_packet_to_state(p, bec: &bec, new_state: &new_state, tx_state: &tx_state, rx_state: &rx_state); |
1366 | if (new_state != old_state) { |
1367 | struct net_device *ndev = can->can.dev; |
1368 | struct sk_buff *skb; |
1369 | struct can_frame *cf; |
1370 | |
1371 | skb = alloc_can_err_skb(dev: ndev, cf: &cf); |
1372 | if (!skb) { |
1373 | ndev->stats.rx_dropped++; |
1374 | return -ENOMEM; |
1375 | } |
1376 | |
1377 | kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); |
1378 | if (old_state == CAN_STATE_BUS_OFF && |
1379 | new_state == CAN_STATE_ERROR_ACTIVE && |
1380 | can->can.restart_ms) { |
1381 | can->can.can_stats.restarts++; |
1382 | cf->can_id |= CAN_ERR_RESTARTED; |
1383 | } |
1384 | |
1385 | kvaser_pciefd_set_skb_timestamp(pcie: can->kv_pcie, skb, timestamp: p->timestamp); |
1386 | |
1387 | cf->data[6] = bec.txerr; |
1388 | cf->data[7] = bec.rxerr; |
1389 | |
1390 | netif_rx(skb); |
1391 | } |
1392 | can->bec.txerr = bec.txerr; |
1393 | can->bec.rxerr = bec.rxerr; |
1394 | /* Check if we need to poll the error counters */ |
1395 | if (bec.txerr || bec.rxerr) |
1396 | mod_timer(timer: &can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); |
1397 | |
1398 | return 0; |
1399 | } |
1400 | |
1401 | static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, |
1402 | struct kvaser_pciefd_rx_packet *p) |
1403 | { |
1404 | struct kvaser_pciefd_can *can; |
1405 | u8 cmdseq; |
1406 | u32 status; |
1407 | u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); |
1408 | |
1409 | if (ch_id >= pcie->nr_channels) |
1410 | return -EIO; |
1411 | |
1412 | can = pcie->can[ch_id]; |
1413 | |
1414 | status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); |
1415 | cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); |
1416 | |
1417 | /* Reset done, start abort and flush */ |
1418 | if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && |
1419 | p->header[0] & KVASER_PCIEFD_SPACK_RMCD && |
1420 | p->header[1] & KVASER_PCIEFD_SPACK_AUTO && |
1421 | cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && |
1422 | status & KVASER_PCIEFD_KCAN_STAT_IDLE) { |
1423 | iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, |
1424 | can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
1425 | kvaser_pciefd_abort_flush_reset(can); |
1426 | } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && |
1427 | p->header[0] & KVASER_PCIEFD_SPACK_IRM && |
1428 | cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && |
1429 | status & KVASER_PCIEFD_KCAN_STAT_IDLE) { |
1430 | /* Reset detected, send end of flush if no packet are in FIFO */ |
1431 | u8 count; |
1432 | |
1433 | count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, |
1434 | ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); |
1435 | if (!count) |
1436 | iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, |
1437 | KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), |
1438 | can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); |
1439 | } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && |
1440 | cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { |
1441 | /* Response to status request received */ |
1442 | kvaser_pciefd_handle_status_resp(can, p); |
1443 | if (can->can.state != CAN_STATE_BUS_OFF && |
1444 | can->can.state != CAN_STATE_ERROR_ACTIVE) { |
1445 | mod_timer(timer: &can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); |
1446 | } |
1447 | } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && |
1448 | !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { |
1449 | /* Reset to bus on detected */ |
1450 | if (!completion_done(x: &can->start_comp)) |
1451 | complete(&can->start_comp); |
1452 | } |
1453 | |
1454 | return 0; |
1455 | } |
1456 | |
1457 | static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, |
1458 | struct kvaser_pciefd_rx_packet *p) |
1459 | { |
1460 | struct sk_buff *skb; |
1461 | struct can_frame *cf; |
1462 | |
1463 | skb = alloc_can_err_skb(dev: can->can.dev, cf: &cf); |
1464 | can->can.dev->stats.tx_errors++; |
1465 | if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { |
1466 | if (skb) |
1467 | cf->can_id |= CAN_ERR_LOSTARB; |
1468 | can->can.can_stats.arbitration_lost++; |
1469 | } else if (skb) { |
1470 | cf->can_id |= CAN_ERR_ACK; |
1471 | } |
1472 | |
1473 | if (skb) { |
1474 | cf->can_id |= CAN_ERR_BUSERROR; |
1475 | kvaser_pciefd_set_skb_timestamp(pcie: can->kv_pcie, skb, timestamp: p->timestamp); |
1476 | netif_rx(skb); |
1477 | } else { |
1478 | can->can.dev->stats.rx_dropped++; |
1479 | netdev_warn(dev: can->can.dev, format: "No memory left for err_skb\n" ); |
1480 | } |
1481 | } |
1482 | |
1483 | static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, |
1484 | struct kvaser_pciefd_rx_packet *p) |
1485 | { |
1486 | struct kvaser_pciefd_can *can; |
1487 | bool one_shot_fail = false; |
1488 | u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); |
1489 | |
1490 | if (ch_id >= pcie->nr_channels) |
1491 | return -EIO; |
1492 | |
1493 | can = pcie->can[ch_id]; |
1494 | /* Ignore control packet ACK */ |
1495 | if (p->header[0] & KVASER_PCIEFD_APACKET_CT) |
1496 | return 0; |
1497 | |
1498 | if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { |
1499 | kvaser_pciefd_handle_nack_packet(can, p); |
1500 | one_shot_fail = true; |
1501 | } |
1502 | |
1503 | if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { |
1504 | netdev_dbg(can->can.dev, "Packet was flushed\n" ); |
1505 | } else { |
1506 | int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); |
1507 | int len; |
1508 | u8 count; |
1509 | struct sk_buff *skb; |
1510 | |
1511 | skb = can->can.echo_skb[echo_idx]; |
1512 | if (skb) |
1513 | kvaser_pciefd_set_skb_timestamp(pcie, skb, timestamp: p->timestamp); |
1514 | len = can_get_echo_skb(dev: can->can.dev, idx: echo_idx, NULL); |
1515 | count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, |
1516 | ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); |
1517 | |
1518 | if (count < can->can.echo_skb_max && netif_queue_stopped(dev: can->can.dev)) |
1519 | netif_wake_queue(dev: can->can.dev); |
1520 | |
1521 | if (!one_shot_fail) { |
1522 | can->can.dev->stats.tx_bytes += len; |
1523 | can->can.dev->stats.tx_packets++; |
1524 | } |
1525 | } |
1526 | |
1527 | return 0; |
1528 | } |
1529 | |
1530 | static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, |
1531 | struct kvaser_pciefd_rx_packet *p) |
1532 | { |
1533 | struct kvaser_pciefd_can *can; |
1534 | u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); |
1535 | |
1536 | if (ch_id >= pcie->nr_channels) |
1537 | return -EIO; |
1538 | |
1539 | can = pcie->can[ch_id]; |
1540 | |
1541 | if (!completion_done(x: &can->flush_comp)) |
1542 | complete(&can->flush_comp); |
1543 | |
1544 | return 0; |
1545 | } |
1546 | |
1547 | static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, |
1548 | int dma_buf) |
1549 | { |
1550 | __le32 *buffer = pcie->dma_data[dma_buf]; |
1551 | __le64 timestamp; |
1552 | struct kvaser_pciefd_rx_packet packet; |
1553 | struct kvaser_pciefd_rx_packet *p = &packet; |
1554 | u8 type; |
1555 | int pos = *start_pos; |
1556 | int size; |
1557 | int ret = 0; |
1558 | |
1559 | size = le32_to_cpu(buffer[pos++]); |
1560 | if (!size) { |
1561 | *start_pos = 0; |
1562 | return 0; |
1563 | } |
1564 | |
1565 | p->header[0] = le32_to_cpu(buffer[pos++]); |
1566 | p->header[1] = le32_to_cpu(buffer[pos++]); |
1567 | |
1568 | /* Read 64-bit timestamp */ |
1569 | memcpy(×tamp, &buffer[pos], sizeof(__le64)); |
1570 | pos += 2; |
1571 | p->timestamp = le64_to_cpu(timestamp); |
1572 | |
1573 | type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); |
1574 | switch (type) { |
1575 | case KVASER_PCIEFD_PACK_TYPE_DATA: |
1576 | ret = kvaser_pciefd_handle_data_packet(pcie, p, data: &buffer[pos]); |
1577 | if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { |
1578 | u8 data_len; |
1579 | |
1580 | data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, |
1581 | p->header[1])); |
1582 | pos += DIV_ROUND_UP(data_len, 4); |
1583 | } |
1584 | break; |
1585 | |
1586 | case KVASER_PCIEFD_PACK_TYPE_ACK: |
1587 | ret = kvaser_pciefd_handle_ack_packet(pcie, p); |
1588 | break; |
1589 | |
1590 | case KVASER_PCIEFD_PACK_TYPE_STATUS: |
1591 | ret = kvaser_pciefd_handle_status_packet(pcie, p); |
1592 | break; |
1593 | |
1594 | case KVASER_PCIEFD_PACK_TYPE_ERROR: |
1595 | ret = kvaser_pciefd_handle_error_packet(pcie, p); |
1596 | break; |
1597 | |
1598 | case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: |
1599 | ret = kvaser_pciefd_handle_eflush_packet(pcie, p); |
1600 | break; |
1601 | |
1602 | case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: |
1603 | case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: |
1604 | case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: |
1605 | case KVASER_PCIEFD_PACK_TYPE_TXRQ: |
1606 | dev_info(&pcie->pci->dev, |
1607 | "Received unexpected packet type 0x%08X\n" , type); |
1608 | break; |
1609 | |
1610 | default: |
1611 | dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n" , type); |
1612 | ret = -EIO; |
1613 | break; |
1614 | } |
1615 | |
1616 | if (ret) |
1617 | return ret; |
1618 | |
1619 | /* Position does not point to the end of the package, |
1620 | * corrupted packet size? |
1621 | */ |
1622 | if ((*start_pos + size) != pos) |
1623 | return -EIO; |
1624 | |
1625 | /* Point to the next packet header, if any */ |
1626 | *start_pos = pos; |
1627 | |
1628 | return ret; |
1629 | } |
1630 | |
1631 | static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) |
1632 | { |
1633 | int pos = 0; |
1634 | int res = 0; |
1635 | |
1636 | do { |
1637 | res = kvaser_pciefd_read_packet(pcie, start_pos: &pos, dma_buf); |
1638 | } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); |
1639 | |
1640 | return res; |
1641 | } |
1642 | |
1643 | static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) |
1644 | { |
1645 | u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); |
1646 | |
1647 | if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { |
1648 | kvaser_pciefd_read_buffer(pcie, dma_buf: 0); |
1649 | /* Reset DMA buffer 0 */ |
1650 | iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, |
1651 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); |
1652 | } |
1653 | |
1654 | if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { |
1655 | kvaser_pciefd_read_buffer(pcie, dma_buf: 1); |
1656 | /* Reset DMA buffer 1 */ |
1657 | iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, |
1658 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); |
1659 | } |
1660 | |
1661 | if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || |
1662 | irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || |
1663 | irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || |
1664 | irq & KVASER_PCIEFD_SRB_IRQ_DUF1) |
1665 | dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n" , irq); |
1666 | |
1667 | iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); |
1668 | } |
1669 | |
1670 | static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) |
1671 | { |
1672 | u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
1673 | |
1674 | if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) |
1675 | netdev_err(dev: can->can.dev, format: "Tx FIFO overflow\n" ); |
1676 | |
1677 | if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) |
1678 | netdev_err(dev: can->can.dev, |
1679 | format: "Fail to change bittiming, when not in reset mode\n" ); |
1680 | |
1681 | if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) |
1682 | netdev_err(dev: can->can.dev, format: "CAN FD frame in CAN mode\n" ); |
1683 | |
1684 | if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) |
1685 | netdev_err(dev: can->can.dev, format: "Rx FIFO overflow\n" ); |
1686 | |
1687 | iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); |
1688 | } |
1689 | |
1690 | static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) |
1691 | { |
1692 | struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; |
1693 | const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; |
1694 | u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); |
1695 | int i; |
1696 | |
1697 | if (!(board_irq & irq_mask->all)) |
1698 | return IRQ_NONE; |
1699 | |
1700 | if (board_irq & irq_mask->kcan_rx0) |
1701 | kvaser_pciefd_receive_irq(pcie); |
1702 | |
1703 | for (i = 0; i < pcie->nr_channels; i++) { |
1704 | if (!pcie->can[i]) { |
1705 | dev_err(&pcie->pci->dev, |
1706 | "IRQ mask points to unallocated controller\n" ); |
1707 | break; |
1708 | } |
1709 | |
1710 | /* Check that mask matches channel (i) IRQ mask */ |
1711 | if (board_irq & irq_mask->kcan_tx[i]) |
1712 | kvaser_pciefd_transmit_irq(can: pcie->can[i]); |
1713 | } |
1714 | |
1715 | return IRQ_HANDLED; |
1716 | } |
1717 | |
1718 | static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) |
1719 | { |
1720 | int i; |
1721 | |
1722 | for (i = 0; i < pcie->nr_channels; i++) { |
1723 | struct kvaser_pciefd_can *can = pcie->can[i]; |
1724 | |
1725 | if (can) { |
1726 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
1727 | kvaser_pciefd_pwm_stop(can); |
1728 | free_candev(dev: can->can.dev); |
1729 | } |
1730 | } |
1731 | } |
1732 | |
1733 | static int kvaser_pciefd_probe(struct pci_dev *pdev, |
1734 | const struct pci_device_id *id) |
1735 | { |
1736 | int err; |
1737 | struct kvaser_pciefd *pcie; |
1738 | const struct kvaser_pciefd_irq_mask *irq_mask; |
1739 | void __iomem *irq_en_base; |
1740 | |
1741 | pcie = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pcie), GFP_KERNEL); |
1742 | if (!pcie) |
1743 | return -ENOMEM; |
1744 | |
1745 | pci_set_drvdata(pdev, data: pcie); |
1746 | pcie->pci = pdev; |
1747 | pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; |
1748 | irq_mask = pcie->driver_data->irq_mask; |
1749 | |
1750 | err = pci_enable_device(dev: pdev); |
1751 | if (err) |
1752 | return err; |
1753 | |
1754 | err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); |
1755 | if (err) |
1756 | goto err_disable_pci; |
1757 | |
1758 | pcie->reg_base = pci_iomap(dev: pdev, bar: 0, max: 0); |
1759 | if (!pcie->reg_base) { |
1760 | err = -ENOMEM; |
1761 | goto err_release_regions; |
1762 | } |
1763 | |
1764 | err = kvaser_pciefd_setup_board(pcie); |
1765 | if (err) |
1766 | goto err_pci_iounmap; |
1767 | |
1768 | err = kvaser_pciefd_setup_dma(pcie); |
1769 | if (err) |
1770 | goto err_pci_iounmap; |
1771 | |
1772 | pci_set_master(dev: pdev); |
1773 | |
1774 | err = kvaser_pciefd_setup_can_ctrls(pcie); |
1775 | if (err) |
1776 | goto err_teardown_can_ctrls; |
1777 | |
1778 | err = request_irq(irq: pcie->pci->irq, handler: kvaser_pciefd_irq_handler, |
1779 | IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, dev: pcie); |
1780 | if (err) |
1781 | goto err_teardown_can_ctrls; |
1782 | |
1783 | iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, |
1784 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); |
1785 | |
1786 | iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | |
1787 | KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | |
1788 | KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, |
1789 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); |
1790 | |
1791 | /* Enable PCI interrupts */ |
1792 | irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); |
1793 | iowrite32(irq_mask->all, irq_en_base); |
1794 | /* Ready the DMA buffers */ |
1795 | iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, |
1796 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); |
1797 | iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, |
1798 | KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); |
1799 | |
1800 | err = kvaser_pciefd_reg_candev(pcie); |
1801 | if (err) |
1802 | goto err_free_irq; |
1803 | |
1804 | return 0; |
1805 | |
1806 | err_free_irq: |
1807 | /* Disable PCI interrupts */ |
1808 | iowrite32(0, irq_en_base); |
1809 | free_irq(pcie->pci->irq, pcie); |
1810 | |
1811 | err_teardown_can_ctrls: |
1812 | kvaser_pciefd_teardown_can_ctrls(pcie); |
1813 | iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); |
1814 | pci_clear_master(dev: pdev); |
1815 | |
1816 | err_pci_iounmap: |
1817 | pci_iounmap(dev: pdev, pcie->reg_base); |
1818 | |
1819 | err_release_regions: |
1820 | pci_release_regions(pdev); |
1821 | |
1822 | err_disable_pci: |
1823 | pci_disable_device(dev: pdev); |
1824 | |
1825 | return err; |
1826 | } |
1827 | |
1828 | static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) |
1829 | { |
1830 | int i; |
1831 | |
1832 | for (i = 0; i < pcie->nr_channels; i++) { |
1833 | struct kvaser_pciefd_can *can = pcie->can[i]; |
1834 | |
1835 | if (can) { |
1836 | iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); |
1837 | unregister_candev(dev: can->can.dev); |
1838 | del_timer(timer: &can->bec_poll_timer); |
1839 | kvaser_pciefd_pwm_stop(can); |
1840 | free_candev(dev: can->can.dev); |
1841 | } |
1842 | } |
1843 | } |
1844 | |
1845 | static void kvaser_pciefd_remove(struct pci_dev *pdev) |
1846 | { |
1847 | struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); |
1848 | |
1849 | kvaser_pciefd_remove_all_ctrls(pcie); |
1850 | |
1851 | /* Disable interrupts */ |
1852 | iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); |
1853 | iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); |
1854 | |
1855 | free_irq(pcie->pci->irq, pcie); |
1856 | |
1857 | pci_iounmap(dev: pdev, pcie->reg_base); |
1858 | pci_release_regions(pdev); |
1859 | pci_disable_device(dev: pdev); |
1860 | } |
1861 | |
1862 | static struct pci_driver kvaser_pciefd = { |
1863 | .name = KVASER_PCIEFD_DRV_NAME, |
1864 | .id_table = kvaser_pciefd_id_table, |
1865 | .probe = kvaser_pciefd_probe, |
1866 | .remove = kvaser_pciefd_remove, |
1867 | }; |
1868 | |
1869 | module_pci_driver(kvaser_pciefd) |
1870 | |