1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. |
4 | * |
5 | * Author: Shlomi Gridish <gridish@freescale.com> |
6 | * Li Yang <leoli@freescale.com> |
7 | * |
8 | * Description: |
9 | * QE UCC Gigabit Ethernet Driver |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/stddef.h> |
19 | #include <linux/module.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/netdevice.h> |
22 | #include <linux/etherdevice.h> |
23 | #include <linux/skbuff.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/mm.h> |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/mii.h> |
28 | #include <linux/phy.h> |
29 | #include <linux/phy_fixed.h> |
30 | #include <linux/workqueue.h> |
31 | #include <linux/of.h> |
32 | #include <linux/of_address.h> |
33 | #include <linux/of_irq.h> |
34 | #include <linux/of_mdio.h> |
35 | #include <linux/of_net.h> |
36 | #include <linux/platform_device.h> |
37 | |
38 | #include <linux/uaccess.h> |
39 | #include <asm/irq.h> |
40 | #include <asm/io.h> |
41 | #include <soc/fsl/qe/immap_qe.h> |
42 | #include <soc/fsl/qe/qe.h> |
43 | #include <soc/fsl/qe/ucc.h> |
44 | #include <soc/fsl/qe/ucc_fast.h> |
45 | #include <asm/machdep.h> |
46 | |
47 | #include "ucc_geth.h" |
48 | |
49 | #undef DEBUG |
50 | |
51 | #define ugeth_printk(level, format, arg...) \ |
52 | printk(level format "\n", ## arg) |
53 | |
54 | #define ugeth_dbg(format, arg...) \ |
55 | ugeth_printk(KERN_DEBUG , format , ## arg) |
56 | |
57 | #ifdef UGETH_VERBOSE_DEBUG |
58 | #define ugeth_vdbg ugeth_dbg |
59 | #else |
60 | #define ugeth_vdbg(fmt, args...) do { } while (0) |
61 | #endif /* UGETH_VERBOSE_DEBUG */ |
62 | #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 |
63 | |
64 | |
65 | static DEFINE_SPINLOCK(ugeth_lock); |
66 | |
67 | static struct { |
68 | u32 msg_enable; |
69 | } debug = { -1 }; |
70 | |
71 | module_param_named(debug, debug.msg_enable, int, 0); |
72 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)" ); |
73 | |
74 | static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx) |
75 | { |
76 | static const u8 count[] = { |
77 | [UCC_GETH_NUM_OF_THREADS_1] = 1, |
78 | [UCC_GETH_NUM_OF_THREADS_2] = 2, |
79 | [UCC_GETH_NUM_OF_THREADS_4] = 4, |
80 | [UCC_GETH_NUM_OF_THREADS_6] = 6, |
81 | [UCC_GETH_NUM_OF_THREADS_8] = 8, |
82 | }; |
83 | if (idx >= ARRAY_SIZE(count)) |
84 | return 0; |
85 | return count[idx]; |
86 | } |
87 | |
88 | static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info) |
89 | { |
90 | return 1; |
91 | } |
92 | |
93 | static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info) |
94 | { |
95 | return 1; |
96 | } |
97 | |
98 | static const struct ucc_geth_info ugeth_primary_info = { |
99 | .uf_info = { |
100 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, |
101 | .max_rx_buf_length = 1536, |
102 | /* adjusted at startup if max-speed 1000 */ |
103 | .urfs = UCC_GETH_URFS_INIT, |
104 | .urfet = UCC_GETH_URFET_INIT, |
105 | .urfset = UCC_GETH_URFSET_INIT, |
106 | .utfs = UCC_GETH_UTFS_INIT, |
107 | .utfet = UCC_GETH_UTFET_INIT, |
108 | .utftt = UCC_GETH_UTFTT_INIT, |
109 | .ufpt = 256, |
110 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, |
111 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, |
112 | .tenc = UCC_FAST_TX_ENCODING_NRZ, |
113 | .renc = UCC_FAST_RX_ENCODING_NRZ, |
114 | .tcrc = UCC_FAST_16_BIT_CRC, |
115 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, |
116 | }, |
117 | .extendedFilteringChainPointer = ((uint32_t) NULL), |
118 | .typeorlen = 3072 /*1536 */ , |
119 | .nonBackToBackIfgPart1 = 0x40, |
120 | .nonBackToBackIfgPart2 = 0x60, |
121 | .miminumInterFrameGapEnforcement = 0x50, |
122 | .backToBackInterFrameGap = 0x60, |
123 | .mblinterval = 128, |
124 | .nortsrbytetime = 5, |
125 | .fracsiz = 1, |
126 | .strictpriorityq = 0xff, |
127 | .altBebTruncation = 0xa, |
128 | .excessDefer = 1, |
129 | .maxRetransmission = 0xf, |
130 | .collisionWindow = 0x37, |
131 | .receiveFlowControl = 1, |
132 | .transmitFlowControl = 1, |
133 | .maxGroupAddrInHash = 4, |
134 | .maxIndAddrInHash = 4, |
135 | .prel = 7, |
136 | .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ |
137 | .minFrameLength = 64, |
138 | .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ |
139 | .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ |
140 | .vlantype = 0x8100, |
141 | .ecamptr = ((uint32_t) NULL), |
142 | .eventRegMask = UCCE_OTHER, |
143 | .pausePeriod = 0xf000, |
144 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, |
145 | .bdRingLenTx = { |
146 | TX_BD_RING_LEN, |
147 | TX_BD_RING_LEN, |
148 | TX_BD_RING_LEN, |
149 | TX_BD_RING_LEN, |
150 | TX_BD_RING_LEN, |
151 | TX_BD_RING_LEN, |
152 | TX_BD_RING_LEN, |
153 | TX_BD_RING_LEN}, |
154 | |
155 | .bdRingLenRx = { |
156 | RX_BD_RING_LEN, |
157 | RX_BD_RING_LEN, |
158 | RX_BD_RING_LEN, |
159 | RX_BD_RING_LEN, |
160 | RX_BD_RING_LEN, |
161 | RX_BD_RING_LEN, |
162 | RX_BD_RING_LEN, |
163 | RX_BD_RING_LEN}, |
164 | |
165 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, |
166 | .largestexternallookupkeysize = |
167 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, |
168 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | |
169 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | |
170 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, |
171 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, |
172 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, |
173 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, |
174 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, |
175 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, |
176 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, |
177 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, |
178 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, |
179 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, |
180 | }; |
181 | |
182 | #ifdef DEBUG |
183 | static void mem_disp(u8 *addr, int size) |
184 | { |
185 | u8 *i; |
186 | int size16Aling = (size >> 4) << 4; |
187 | int size4Aling = (size >> 2) << 2; |
188 | int notAlign = 0; |
189 | if (size % 16) |
190 | notAlign = 1; |
191 | |
192 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) |
193 | printk("0x%08x: %08x %08x %08x %08x\r\n" , |
194 | (u32) i, |
195 | *((u32 *) (i)), |
196 | *((u32 *) (i + 4)), |
197 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); |
198 | if (notAlign == 1) |
199 | printk("0x%08x: " , (u32) i); |
200 | for (; (u32) i < (u32) addr + size4Aling; i += 4) |
201 | printk("%08x " , *((u32 *) (i))); |
202 | for (; (u32) i < (u32) addr + size; i++) |
203 | printk("%02x" , *((i))); |
204 | if (notAlign == 1) |
205 | printk("\r\n" ); |
206 | } |
207 | #endif /* DEBUG */ |
208 | |
209 | static struct list_head *dequeue(struct list_head *lh) |
210 | { |
211 | unsigned long flags; |
212 | |
213 | spin_lock_irqsave(&ugeth_lock, flags); |
214 | if (!list_empty(head: lh)) { |
215 | struct list_head *node = lh->next; |
216 | list_del(entry: node); |
217 | spin_unlock_irqrestore(lock: &ugeth_lock, flags); |
218 | return node; |
219 | } else { |
220 | spin_unlock_irqrestore(lock: &ugeth_lock, flags); |
221 | return NULL; |
222 | } |
223 | } |
224 | |
225 | static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, |
226 | u8 __iomem *bd) |
227 | { |
228 | struct sk_buff *skb; |
229 | |
230 | skb = netdev_alloc_skb(dev: ugeth->ndev, |
231 | length: ugeth->ug_info->uf_info.max_rx_buf_length + |
232 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); |
233 | if (!skb) |
234 | return NULL; |
235 | |
236 | /* We need the data buffer to be aligned properly. We will reserve |
237 | * as many bytes as needed to align the data properly |
238 | */ |
239 | skb_reserve(skb, |
240 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - |
241 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - |
242 | 1))); |
243 | |
244 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
245 | dma_map_single(ugeth->dev, |
246 | skb->data, |
247 | ugeth->ug_info->uf_info.max_rx_buf_length + |
248 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, |
249 | DMA_FROM_DEVICE)); |
250 | |
251 | out_be32((u32 __iomem *)bd, |
252 | (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); |
253 | |
254 | return skb; |
255 | } |
256 | |
257 | static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) |
258 | { |
259 | u8 __iomem *bd; |
260 | u32 bd_status; |
261 | struct sk_buff *skb; |
262 | int i; |
263 | |
264 | bd = ugeth->p_rx_bd_ring[rxQ]; |
265 | i = 0; |
266 | |
267 | do { |
268 | bd_status = in_be32((u32 __iomem *)bd); |
269 | skb = get_new_skb(ugeth, bd); |
270 | |
271 | if (!skb) /* If can not allocate data buffer, |
272 | abort. Cleanup will be elsewhere */ |
273 | return -ENOMEM; |
274 | |
275 | ugeth->rx_skbuff[rxQ][i] = skb; |
276 | |
277 | /* advance the BD pointer */ |
278 | bd += sizeof(struct qe_bd); |
279 | i++; |
280 | } while (!(bd_status & R_W)); |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | static int fill_init_enet_entries(struct ucc_geth_private *ugeth, |
286 | u32 *p_start, |
287 | u8 num_entries, |
288 | u32 thread_size, |
289 | u32 thread_alignment, |
290 | unsigned int risc, |
291 | int skip_page_for_first_entry) |
292 | { |
293 | u32 init_enet_offset; |
294 | u8 i; |
295 | int snum; |
296 | |
297 | for (i = 0; i < num_entries; i++) { |
298 | if ((snum = qe_get_snum()) < 0) { |
299 | if (netif_msg_ifup(ugeth)) |
300 | pr_err("Can not get SNUM\n" ); |
301 | return snum; |
302 | } |
303 | if ((i == 0) && skip_page_for_first_entry) |
304 | /* First entry of Rx does not have page */ |
305 | init_enet_offset = 0; |
306 | else { |
307 | init_enet_offset = |
308 | qe_muram_alloc(size: thread_size, align: thread_alignment); |
309 | if (IS_ERR_VALUE(init_enet_offset)) { |
310 | if (netif_msg_ifup(ugeth)) |
311 | pr_err("Can not allocate DPRAM memory\n" ); |
312 | qe_put_snum(snum: (u8) snum); |
313 | return -ENOMEM; |
314 | } |
315 | } |
316 | *(p_start++) = |
317 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset |
318 | | risc; |
319 | } |
320 | |
321 | return 0; |
322 | } |
323 | |
324 | static int return_init_enet_entries(struct ucc_geth_private *ugeth, |
325 | u32 *p_start, |
326 | u8 num_entries, |
327 | unsigned int risc, |
328 | int skip_page_for_first_entry) |
329 | { |
330 | u32 init_enet_offset; |
331 | u8 i; |
332 | int snum; |
333 | |
334 | for (i = 0; i < num_entries; i++) { |
335 | u32 val = *p_start; |
336 | |
337 | /* Check that this entry was actually valid -- |
338 | needed in case failed in allocations */ |
339 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
340 | snum = |
341 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
342 | ENET_INIT_PARAM_SNUM_SHIFT; |
343 | qe_put_snum(snum: (u8) snum); |
344 | if (!((i == 0) && skip_page_for_first_entry)) { |
345 | /* First entry of Rx does not have page */ |
346 | init_enet_offset = |
347 | (val & ENET_INIT_PARAM_PTR_MASK); |
348 | qe_muram_free(offset: init_enet_offset); |
349 | } |
350 | *p_start++ = 0; |
351 | } |
352 | } |
353 | |
354 | return 0; |
355 | } |
356 | |
357 | #ifdef DEBUG |
358 | static int dump_init_enet_entries(struct ucc_geth_private *ugeth, |
359 | u32 __iomem *p_start, |
360 | u8 num_entries, |
361 | u32 thread_size, |
362 | unsigned int risc, |
363 | int skip_page_for_first_entry) |
364 | { |
365 | u32 init_enet_offset; |
366 | u8 i; |
367 | int snum; |
368 | |
369 | for (i = 0; i < num_entries; i++) { |
370 | u32 val = in_be32(p_start); |
371 | |
372 | /* Check that this entry was actually valid -- |
373 | needed in case failed in allocations */ |
374 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
375 | snum = |
376 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
377 | ENET_INIT_PARAM_SNUM_SHIFT; |
378 | qe_put_snum((u8) snum); |
379 | if (!((i == 0) && skip_page_for_first_entry)) { |
380 | /* First entry of Rx does not have page */ |
381 | init_enet_offset = |
382 | (in_be32(p_start) & |
383 | ENET_INIT_PARAM_PTR_MASK); |
384 | pr_info("Init enet entry %d:\n" , i); |
385 | pr_info("Base address: 0x%08x\n" , |
386 | (u32)qe_muram_addr(init_enet_offset)); |
387 | mem_disp(qe_muram_addr(init_enet_offset), |
388 | thread_size); |
389 | } |
390 | p_start++; |
391 | } |
392 | } |
393 | |
394 | return 0; |
395 | } |
396 | #endif |
397 | |
398 | static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) |
399 | { |
400 | kfree(objp: enet_addr_cont); |
401 | } |
402 | |
403 | static void set_mac_addr(__be16 __iomem *reg, u8 *mac) |
404 | { |
405 | out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); |
406 | out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); |
407 | out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); |
408 | } |
409 | |
410 | static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) |
411 | { |
412 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
413 | |
414 | if (paddr_num >= NUM_OF_PADDRS) { |
415 | pr_warn("%s: Invalid paddr_num: %u\n" , __func__, paddr_num); |
416 | return -EINVAL; |
417 | } |
418 | |
419 | p_82xx_addr_filt = |
420 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
421 | addressfiltering; |
422 | |
423 | /* Writing address ff.ff.ff.ff.ff.ff disables address |
424 | recognition for this register */ |
425 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); |
426 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); |
427 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); |
428 | |
429 | return 0; |
430 | } |
431 | |
432 | static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, |
433 | u8 *p_enet_addr) |
434 | { |
435 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
436 | u32 cecr_subblock; |
437 | |
438 | p_82xx_addr_filt = |
439 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
440 | addressfiltering; |
441 | |
442 | cecr_subblock = |
443 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info.ucc_num); |
444 | |
445 | /* Ethernet frames are defined in Little Endian mode, |
446 | therefore to insert */ |
447 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ |
448 | |
449 | set_mac_addr(reg: &p_82xx_addr_filt->taddr.h, mac: p_enet_addr); |
450 | |
451 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, device: cecr_subblock, |
452 | QE_CR_PROTOCOL_ETHERNET, cmd_input: 0); |
453 | } |
454 | |
455 | #ifdef DEBUG |
456 | static void get_statistics(struct ucc_geth_private *ugeth, |
457 | struct ucc_geth_tx_firmware_statistics * |
458 | tx_firmware_statistics, |
459 | struct ucc_geth_rx_firmware_statistics * |
460 | rx_firmware_statistics, |
461 | struct ucc_geth_hardware_statistics *hardware_statistics) |
462 | { |
463 | struct ucc_fast __iomem *uf_regs; |
464 | struct ucc_geth __iomem *ug_regs; |
465 | struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; |
466 | struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; |
467 | |
468 | ug_regs = ugeth->ug_regs; |
469 | uf_regs = (struct ucc_fast __iomem *) ug_regs; |
470 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; |
471 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; |
472 | |
473 | /* Tx firmware only if user handed pointer and driver actually |
474 | gathers Tx firmware statistics */ |
475 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { |
476 | tx_firmware_statistics->sicoltx = |
477 | in_be32(&p_tx_fw_statistics_pram->sicoltx); |
478 | tx_firmware_statistics->mulcoltx = |
479 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); |
480 | tx_firmware_statistics->latecoltxfr = |
481 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); |
482 | tx_firmware_statistics->frabortduecol = |
483 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); |
484 | tx_firmware_statistics->frlostinmactxer = |
485 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); |
486 | tx_firmware_statistics->carriersenseertx = |
487 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); |
488 | tx_firmware_statistics->frtxok = |
489 | in_be32(&p_tx_fw_statistics_pram->frtxok); |
490 | tx_firmware_statistics->txfrexcessivedefer = |
491 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); |
492 | tx_firmware_statistics->txpkts256 = |
493 | in_be32(&p_tx_fw_statistics_pram->txpkts256); |
494 | tx_firmware_statistics->txpkts512 = |
495 | in_be32(&p_tx_fw_statistics_pram->txpkts512); |
496 | tx_firmware_statistics->txpkts1024 = |
497 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); |
498 | tx_firmware_statistics->txpktsjumbo = |
499 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); |
500 | } |
501 | |
502 | /* Rx firmware only if user handed pointer and driver actually |
503 | * gathers Rx firmware statistics */ |
504 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { |
505 | int i; |
506 | rx_firmware_statistics->frrxfcser = |
507 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); |
508 | rx_firmware_statistics->fraligner = |
509 | in_be32(&p_rx_fw_statistics_pram->fraligner); |
510 | rx_firmware_statistics->inrangelenrxer = |
511 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); |
512 | rx_firmware_statistics->outrangelenrxer = |
513 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); |
514 | rx_firmware_statistics->frtoolong = |
515 | in_be32(&p_rx_fw_statistics_pram->frtoolong); |
516 | rx_firmware_statistics->runt = |
517 | in_be32(&p_rx_fw_statistics_pram->runt); |
518 | rx_firmware_statistics->verylongevent = |
519 | in_be32(&p_rx_fw_statistics_pram->verylongevent); |
520 | rx_firmware_statistics->symbolerror = |
521 | in_be32(&p_rx_fw_statistics_pram->symbolerror); |
522 | rx_firmware_statistics->dropbsy = |
523 | in_be32(&p_rx_fw_statistics_pram->dropbsy); |
524 | for (i = 0; i < 0x8; i++) |
525 | rx_firmware_statistics->res0[i] = |
526 | p_rx_fw_statistics_pram->res0[i]; |
527 | rx_firmware_statistics->mismatchdrop = |
528 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); |
529 | rx_firmware_statistics->underpkts = |
530 | in_be32(&p_rx_fw_statistics_pram->underpkts); |
531 | rx_firmware_statistics->pkts256 = |
532 | in_be32(&p_rx_fw_statistics_pram->pkts256); |
533 | rx_firmware_statistics->pkts512 = |
534 | in_be32(&p_rx_fw_statistics_pram->pkts512); |
535 | rx_firmware_statistics->pkts1024 = |
536 | in_be32(&p_rx_fw_statistics_pram->pkts1024); |
537 | rx_firmware_statistics->pktsjumbo = |
538 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); |
539 | rx_firmware_statistics->frlossinmacer = |
540 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); |
541 | rx_firmware_statistics->pausefr = |
542 | in_be32(&p_rx_fw_statistics_pram->pausefr); |
543 | for (i = 0; i < 0x4; i++) |
544 | rx_firmware_statistics->res1[i] = |
545 | p_rx_fw_statistics_pram->res1[i]; |
546 | rx_firmware_statistics->removevlan = |
547 | in_be32(&p_rx_fw_statistics_pram->removevlan); |
548 | rx_firmware_statistics->replacevlan = |
549 | in_be32(&p_rx_fw_statistics_pram->replacevlan); |
550 | rx_firmware_statistics->insertvlan = |
551 | in_be32(&p_rx_fw_statistics_pram->insertvlan); |
552 | } |
553 | |
554 | /* Hardware only if user handed pointer and driver actually |
555 | gathers hardware statistics */ |
556 | if (hardware_statistics && |
557 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { |
558 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); |
559 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); |
560 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); |
561 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); |
562 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); |
563 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); |
564 | hardware_statistics->txok = in_be32(&ug_regs->txok); |
565 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); |
566 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); |
567 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); |
568 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); |
569 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); |
570 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); |
571 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); |
572 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); |
573 | } |
574 | } |
575 | |
576 | static void dump_bds(struct ucc_geth_private *ugeth) |
577 | { |
578 | int i; |
579 | int length; |
580 | |
581 | for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) { |
582 | if (ugeth->p_tx_bd_ring[i]) { |
583 | length = |
584 | (ugeth->ug_info->bdRingLenTx[i] * |
585 | sizeof(struct qe_bd)); |
586 | pr_info("TX BDs[%d]\n" , i); |
587 | mem_disp(ugeth->p_tx_bd_ring[i], length); |
588 | } |
589 | } |
590 | for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) { |
591 | if (ugeth->p_rx_bd_ring[i]) { |
592 | length = |
593 | (ugeth->ug_info->bdRingLenRx[i] * |
594 | sizeof(struct qe_bd)); |
595 | pr_info("RX BDs[%d]\n" , i); |
596 | mem_disp(ugeth->p_rx_bd_ring[i], length); |
597 | } |
598 | } |
599 | } |
600 | |
601 | static void dump_regs(struct ucc_geth_private *ugeth) |
602 | { |
603 | int i; |
604 | |
605 | pr_info("UCC%d Geth registers:\n" , ugeth->ug_info->uf_info.ucc_num + 1); |
606 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->ug_regs); |
607 | |
608 | pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n" , |
609 | (u32)&ugeth->ug_regs->maccfg1, |
610 | in_be32(&ugeth->ug_regs->maccfg1)); |
611 | pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n" , |
612 | (u32)&ugeth->ug_regs->maccfg2, |
613 | in_be32(&ugeth->ug_regs->maccfg2)); |
614 | pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n" , |
615 | (u32)&ugeth->ug_regs->ipgifg, |
616 | in_be32(&ugeth->ug_regs->ipgifg)); |
617 | pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n" , |
618 | (u32)&ugeth->ug_regs->hafdup, |
619 | in_be32(&ugeth->ug_regs->hafdup)); |
620 | pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n" , |
621 | (u32)&ugeth->ug_regs->ifctl, |
622 | in_be32(&ugeth->ug_regs->ifctl)); |
623 | pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n" , |
624 | (u32)&ugeth->ug_regs->ifstat, |
625 | in_be32(&ugeth->ug_regs->ifstat)); |
626 | pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n" , |
627 | (u32)&ugeth->ug_regs->macstnaddr1, |
628 | in_be32(&ugeth->ug_regs->macstnaddr1)); |
629 | pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n" , |
630 | (u32)&ugeth->ug_regs->macstnaddr2, |
631 | in_be32(&ugeth->ug_regs->macstnaddr2)); |
632 | pr_info("uempr : addr - 0x%08x, val - 0x%08x\n" , |
633 | (u32)&ugeth->ug_regs->uempr, |
634 | in_be32(&ugeth->ug_regs->uempr)); |
635 | pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n" , |
636 | (u32)&ugeth->ug_regs->utbipar, |
637 | in_be32(&ugeth->ug_regs->utbipar)); |
638 | pr_info("uescr : addr - 0x%08x, val - 0x%04x\n" , |
639 | (u32)&ugeth->ug_regs->uescr, |
640 | in_be16(&ugeth->ug_regs->uescr)); |
641 | pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n" , |
642 | (u32)&ugeth->ug_regs->tx64, |
643 | in_be32(&ugeth->ug_regs->tx64)); |
644 | pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n" , |
645 | (u32)&ugeth->ug_regs->tx127, |
646 | in_be32(&ugeth->ug_regs->tx127)); |
647 | pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n" , |
648 | (u32)&ugeth->ug_regs->tx255, |
649 | in_be32(&ugeth->ug_regs->tx255)); |
650 | pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n" , |
651 | (u32)&ugeth->ug_regs->rx64, |
652 | in_be32(&ugeth->ug_regs->rx64)); |
653 | pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n" , |
654 | (u32)&ugeth->ug_regs->rx127, |
655 | in_be32(&ugeth->ug_regs->rx127)); |
656 | pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n" , |
657 | (u32)&ugeth->ug_regs->rx255, |
658 | in_be32(&ugeth->ug_regs->rx255)); |
659 | pr_info("txok : addr - 0x%08x, val - 0x%08x\n" , |
660 | (u32)&ugeth->ug_regs->txok, |
661 | in_be32(&ugeth->ug_regs->txok)); |
662 | pr_info("txcf : addr - 0x%08x, val - 0x%04x\n" , |
663 | (u32)&ugeth->ug_regs->txcf, |
664 | in_be16(&ugeth->ug_regs->txcf)); |
665 | pr_info("tmca : addr - 0x%08x, val - 0x%08x\n" , |
666 | (u32)&ugeth->ug_regs->tmca, |
667 | in_be32(&ugeth->ug_regs->tmca)); |
668 | pr_info("tbca : addr - 0x%08x, val - 0x%08x\n" , |
669 | (u32)&ugeth->ug_regs->tbca, |
670 | in_be32(&ugeth->ug_regs->tbca)); |
671 | pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n" , |
672 | (u32)&ugeth->ug_regs->rxfok, |
673 | in_be32(&ugeth->ug_regs->rxfok)); |
674 | pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n" , |
675 | (u32)&ugeth->ug_regs->rxbok, |
676 | in_be32(&ugeth->ug_regs->rxbok)); |
677 | pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n" , |
678 | (u32)&ugeth->ug_regs->rbyt, |
679 | in_be32(&ugeth->ug_regs->rbyt)); |
680 | pr_info("rmca : addr - 0x%08x, val - 0x%08x\n" , |
681 | (u32)&ugeth->ug_regs->rmca, |
682 | in_be32(&ugeth->ug_regs->rmca)); |
683 | pr_info("rbca : addr - 0x%08x, val - 0x%08x\n" , |
684 | (u32)&ugeth->ug_regs->rbca, |
685 | in_be32(&ugeth->ug_regs->rbca)); |
686 | pr_info("scar : addr - 0x%08x, val - 0x%08x\n" , |
687 | (u32)&ugeth->ug_regs->scar, |
688 | in_be32(&ugeth->ug_regs->scar)); |
689 | pr_info("scam : addr - 0x%08x, val - 0x%08x\n" , |
690 | (u32)&ugeth->ug_regs->scam, |
691 | in_be32(&ugeth->ug_regs->scam)); |
692 | |
693 | if (ugeth->p_thread_data_tx) { |
694 | int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx); |
695 | |
696 | pr_info("Thread data TXs:\n" ); |
697 | pr_info("Base address: 0x%08x\n" , |
698 | (u32)ugeth->p_thread_data_tx); |
699 | for (i = 0; i < count; i++) { |
700 | pr_info("Thread data TX[%d]:\n" , i); |
701 | pr_info("Base address: 0x%08x\n" , |
702 | (u32)&ugeth->p_thread_data_tx[i]); |
703 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], |
704 | sizeof(struct ucc_geth_thread_data_tx)); |
705 | } |
706 | } |
707 | if (ugeth->p_thread_data_rx) { |
708 | int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx); |
709 | |
710 | pr_info("Thread data RX:\n" ); |
711 | pr_info("Base address: 0x%08x\n" , |
712 | (u32)ugeth->p_thread_data_rx); |
713 | for (i = 0; i < count; i++) { |
714 | pr_info("Thread data RX[%d]:\n" , i); |
715 | pr_info("Base address: 0x%08x\n" , |
716 | (u32)&ugeth->p_thread_data_rx[i]); |
717 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], |
718 | sizeof(struct ucc_geth_thread_data_rx)); |
719 | } |
720 | } |
721 | if (ugeth->p_exf_glbl_param) { |
722 | pr_info("EXF global param:\n" ); |
723 | pr_info("Base address: 0x%08x\n" , |
724 | (u32)ugeth->p_exf_glbl_param); |
725 | mem_disp((u8 *) ugeth->p_exf_glbl_param, |
726 | sizeof(*ugeth->p_exf_glbl_param)); |
727 | } |
728 | if (ugeth->p_tx_glbl_pram) { |
729 | pr_info("TX global param:\n" ); |
730 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->p_tx_glbl_pram); |
731 | pr_info("temoder : addr - 0x%08x, val - 0x%04x\n" , |
732 | (u32)&ugeth->p_tx_glbl_pram->temoder, |
733 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); |
734 | pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n" , |
735 | (u32)&ugeth->p_tx_glbl_pram->sqptr, |
736 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); |
737 | pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n" , |
738 | (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer, |
739 | in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer)); |
740 | pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n" , |
741 | (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr, |
742 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); |
743 | pr_info("tstate : addr - 0x%08x, val - 0x%08x\n" , |
744 | (u32)&ugeth->p_tx_glbl_pram->tstate, |
745 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); |
746 | pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n" , |
747 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[0], |
748 | ugeth->p_tx_glbl_pram->iphoffset[0]); |
749 | pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n" , |
750 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[1], |
751 | ugeth->p_tx_glbl_pram->iphoffset[1]); |
752 | pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n" , |
753 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[2], |
754 | ugeth->p_tx_glbl_pram->iphoffset[2]); |
755 | pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n" , |
756 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[3], |
757 | ugeth->p_tx_glbl_pram->iphoffset[3]); |
758 | pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n" , |
759 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[4], |
760 | ugeth->p_tx_glbl_pram->iphoffset[4]); |
761 | pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n" , |
762 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[5], |
763 | ugeth->p_tx_glbl_pram->iphoffset[5]); |
764 | pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n" , |
765 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[6], |
766 | ugeth->p_tx_glbl_pram->iphoffset[6]); |
767 | pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n" , |
768 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[7], |
769 | ugeth->p_tx_glbl_pram->iphoffset[7]); |
770 | pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n" , |
771 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[0], |
772 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); |
773 | pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n" , |
774 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[1], |
775 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); |
776 | pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n" , |
777 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[2], |
778 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); |
779 | pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n" , |
780 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[3], |
781 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); |
782 | pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n" , |
783 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[4], |
784 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); |
785 | pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n" , |
786 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[5], |
787 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); |
788 | pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n" , |
789 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[6], |
790 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); |
791 | pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n" , |
792 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[7], |
793 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); |
794 | pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n" , |
795 | (u32)&ugeth->p_tx_glbl_pram->tqptr, |
796 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); |
797 | } |
798 | if (ugeth->p_rx_glbl_pram) { |
799 | pr_info("RX global param:\n" ); |
800 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->p_rx_glbl_pram); |
801 | pr_info("remoder : addr - 0x%08x, val - 0x%08x\n" , |
802 | (u32)&ugeth->p_rx_glbl_pram->remoder, |
803 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); |
804 | pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n" , |
805 | (u32)&ugeth->p_rx_glbl_pram->rqptr, |
806 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); |
807 | pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n" , |
808 | (u32)&ugeth->p_rx_glbl_pram->typeorlen, |
809 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); |
810 | pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n" , |
811 | (u32)&ugeth->p_rx_glbl_pram->rxgstpack, |
812 | ugeth->p_rx_glbl_pram->rxgstpack); |
813 | pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n" , |
814 | (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr, |
815 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); |
816 | pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n" , |
817 | (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr, |
818 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); |
819 | pr_info("rstate : addr - 0x%08x, val - 0x%02x\n" , |
820 | (u32)&ugeth->p_rx_glbl_pram->rstate, |
821 | ugeth->p_rx_glbl_pram->rstate); |
822 | pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n" , |
823 | (u32)&ugeth->p_rx_glbl_pram->mrblr, |
824 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); |
825 | pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n" , |
826 | (u32)&ugeth->p_rx_glbl_pram->rbdqptr, |
827 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); |
828 | pr_info("mflr : addr - 0x%08x, val - 0x%04x\n" , |
829 | (u32)&ugeth->p_rx_glbl_pram->mflr, |
830 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); |
831 | pr_info("minflr : addr - 0x%08x, val - 0x%04x\n" , |
832 | (u32)&ugeth->p_rx_glbl_pram->minflr, |
833 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); |
834 | pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n" , |
835 | (u32)&ugeth->p_rx_glbl_pram->maxd1, |
836 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); |
837 | pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n" , |
838 | (u32)&ugeth->p_rx_glbl_pram->maxd2, |
839 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); |
840 | pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n" , |
841 | (u32)&ugeth->p_rx_glbl_pram->ecamptr, |
842 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); |
843 | pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n" , |
844 | (u32)&ugeth->p_rx_glbl_pram->l2qt, |
845 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); |
846 | pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n" , |
847 | (u32)&ugeth->p_rx_glbl_pram->l3qt[0], |
848 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); |
849 | pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n" , |
850 | (u32)&ugeth->p_rx_glbl_pram->l3qt[1], |
851 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); |
852 | pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n" , |
853 | (u32)&ugeth->p_rx_glbl_pram->l3qt[2], |
854 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); |
855 | pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n" , |
856 | (u32)&ugeth->p_rx_glbl_pram->l3qt[3], |
857 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); |
858 | pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n" , |
859 | (u32)&ugeth->p_rx_glbl_pram->l3qt[4], |
860 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); |
861 | pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n" , |
862 | (u32)&ugeth->p_rx_glbl_pram->l3qt[5], |
863 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); |
864 | pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n" , |
865 | (u32)&ugeth->p_rx_glbl_pram->l3qt[6], |
866 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); |
867 | pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n" , |
868 | (u32)&ugeth->p_rx_glbl_pram->l3qt[7], |
869 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); |
870 | pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n" , |
871 | (u32)&ugeth->p_rx_glbl_pram->vlantype, |
872 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); |
873 | pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n" , |
874 | (u32)&ugeth->p_rx_glbl_pram->vlantci, |
875 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); |
876 | for (i = 0; i < 64; i++) |
877 | pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n" , |
878 | i, |
879 | (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i], |
880 | ugeth->p_rx_glbl_pram->addressfiltering[i]); |
881 | pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n" , |
882 | (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam, |
883 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); |
884 | } |
885 | if (ugeth->p_send_q_mem_reg) { |
886 | pr_info("Send Q memory registers:\n" ); |
887 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->p_send_q_mem_reg); |
888 | for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) { |
889 | pr_info("SQQD[%d]:\n" , i); |
890 | pr_info("Base address: 0x%08x\n" , |
891 | (u32)&ugeth->p_send_q_mem_reg->sqqd[i]); |
892 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], |
893 | sizeof(struct ucc_geth_send_queue_qd)); |
894 | } |
895 | } |
896 | if (ugeth->p_scheduler) { |
897 | pr_info("Scheduler:\n" ); |
898 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->p_scheduler); |
899 | mem_disp((u8 *) ugeth->p_scheduler, |
900 | sizeof(*ugeth->p_scheduler)); |
901 | } |
902 | if (ugeth->p_tx_fw_statistics_pram) { |
903 | pr_info("TX FW statistics pram:\n" ); |
904 | pr_info("Base address: 0x%08x\n" , |
905 | (u32)ugeth->p_tx_fw_statistics_pram); |
906 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, |
907 | sizeof(*ugeth->p_tx_fw_statistics_pram)); |
908 | } |
909 | if (ugeth->p_rx_fw_statistics_pram) { |
910 | pr_info("RX FW statistics pram:\n" ); |
911 | pr_info("Base address: 0x%08x\n" , |
912 | (u32)ugeth->p_rx_fw_statistics_pram); |
913 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, |
914 | sizeof(*ugeth->p_rx_fw_statistics_pram)); |
915 | } |
916 | if (ugeth->p_rx_irq_coalescing_tbl) { |
917 | pr_info("RX IRQ coalescing tables:\n" ); |
918 | pr_info("Base address: 0x%08x\n" , |
919 | (u32)ugeth->p_rx_irq_coalescing_tbl); |
920 | for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) { |
921 | pr_info("RX IRQ coalescing table entry[%d]:\n" , i); |
922 | pr_info("Base address: 0x%08x\n" , |
923 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> |
924 | coalescingentry[i]); |
925 | pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n" , |
926 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> |
927 | coalescingentry[i].interruptcoalescingmaxvalue, |
928 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> |
929 | coalescingentry[i]. |
930 | interruptcoalescingmaxvalue)); |
931 | pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n" , |
932 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> |
933 | coalescingentry[i].interruptcoalescingcounter, |
934 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> |
935 | coalescingentry[i]. |
936 | interruptcoalescingcounter)); |
937 | } |
938 | } |
939 | if (ugeth->p_rx_bd_qs_tbl) { |
940 | pr_info("RX BD QS tables:\n" ); |
941 | pr_info("Base address: 0x%08x\n" , (u32)ugeth->p_rx_bd_qs_tbl); |
942 | for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) { |
943 | pr_info("RX BD QS table[%d]:\n" , i); |
944 | pr_info("Base address: 0x%08x\n" , |
945 | (u32)&ugeth->p_rx_bd_qs_tbl[i]); |
946 | pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n" , |
947 | (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, |
948 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); |
949 | pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n" , |
950 | (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr, |
951 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); |
952 | pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n" , |
953 | (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, |
954 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. |
955 | externalbdbaseptr)); |
956 | pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n" , |
957 | (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr, |
958 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); |
959 | pr_info("ucode RX Prefetched BDs:\n" ); |
960 | pr_info("Base address: 0x%08x\n" , |
961 | (u32)qe_muram_addr(in_be32 |
962 | (&ugeth->p_rx_bd_qs_tbl[i]. |
963 | bdbaseptr))); |
964 | mem_disp((u8 *) |
965 | qe_muram_addr(in_be32 |
966 | (&ugeth->p_rx_bd_qs_tbl[i]. |
967 | bdbaseptr)), |
968 | sizeof(struct ucc_geth_rx_prefetched_bds)); |
969 | } |
970 | } |
971 | if (ugeth->p_init_enet_param_shadow) { |
972 | int size; |
973 | pr_info("Init enet param shadow:\n" ); |
974 | pr_info("Base address: 0x%08x\n" , |
975 | (u32) ugeth->p_init_enet_param_shadow); |
976 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, |
977 | sizeof(*ugeth->p_init_enet_param_shadow)); |
978 | |
979 | size = sizeof(struct ucc_geth_thread_rx_pram); |
980 | if (ugeth->ug_info->rxExtendedFiltering) { |
981 | size += |
982 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; |
983 | if (ugeth->ug_info->largestexternallookupkeysize == |
984 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) |
985 | size += |
986 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; |
987 | if (ugeth->ug_info->largestexternallookupkeysize == |
988 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) |
989 | size += |
990 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; |
991 | } |
992 | |
993 | dump_init_enet_entries(ugeth, |
994 | &(ugeth->p_init_enet_param_shadow-> |
995 | txthread[0]), |
996 | ENET_INIT_PARAM_MAX_ENTRIES_TX, |
997 | sizeof(struct ucc_geth_thread_tx_pram), |
998 | ugeth->ug_info->riscTx, 0); |
999 | dump_init_enet_entries(ugeth, |
1000 | &(ugeth->p_init_enet_param_shadow-> |
1001 | rxthread[0]), |
1002 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, |
1003 | ugeth->ug_info->riscRx, 1); |
1004 | } |
1005 | } |
1006 | #endif /* DEBUG */ |
1007 | |
1008 | static void init_default_reg_vals(u32 __iomem *upsmr_register, |
1009 | u32 __iomem *maccfg1_register, |
1010 | u32 __iomem *maccfg2_register) |
1011 | { |
1012 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); |
1013 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); |
1014 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); |
1015 | } |
1016 | |
1017 | static int init_half_duplex_params(int alt_beb, |
1018 | int back_pressure_no_backoff, |
1019 | int no_backoff, |
1020 | int excess_defer, |
1021 | u8 alt_beb_truncation, |
1022 | u8 max_retransmissions, |
1023 | u8 collision_window, |
1024 | u32 __iomem *hafdup_register) |
1025 | { |
1026 | u32 value = 0; |
1027 | |
1028 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || |
1029 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || |
1030 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) |
1031 | return -EINVAL; |
1032 | |
1033 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); |
1034 | |
1035 | if (alt_beb) |
1036 | value |= HALFDUP_ALT_BEB; |
1037 | if (back_pressure_no_backoff) |
1038 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; |
1039 | if (no_backoff) |
1040 | value |= HALFDUP_NO_BACKOFF; |
1041 | if (excess_defer) |
1042 | value |= HALFDUP_EXCESSIVE_DEFER; |
1043 | |
1044 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); |
1045 | |
1046 | value |= collision_window; |
1047 | |
1048 | out_be32(hafdup_register, value); |
1049 | return 0; |
1050 | } |
1051 | |
1052 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, |
1053 | u8 non_btb_ipg, |
1054 | u8 min_ifg, |
1055 | u8 btb_ipg, |
1056 | u32 __iomem *ipgifg_register) |
1057 | { |
1058 | u32 value = 0; |
1059 | |
1060 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back |
1061 | IPG part 2 */ |
1062 | if (non_btb_cs_ipg > non_btb_ipg) |
1063 | return -EINVAL; |
1064 | |
1065 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || |
1066 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || |
1067 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ |
1068 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) |
1069 | return -EINVAL; |
1070 | |
1071 | value |= |
1072 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & |
1073 | IPGIFG_NBTB_CS_IPG_MASK); |
1074 | value |= |
1075 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & |
1076 | IPGIFG_NBTB_IPG_MASK); |
1077 | value |= |
1078 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & |
1079 | IPGIFG_MIN_IFG_MASK); |
1080 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); |
1081 | |
1082 | out_be32(ipgifg_register, value); |
1083 | return 0; |
1084 | } |
1085 | |
1086 | int init_flow_control_params(u32 automatic_flow_control_mode, |
1087 | int rx_flow_control_enable, |
1088 | int tx_flow_control_enable, |
1089 | u16 pause_period, |
1090 | u16 extension_field, |
1091 | u32 __iomem *upsmr_register, |
1092 | u32 __iomem *uempr_register, |
1093 | u32 __iomem *maccfg1_register) |
1094 | { |
1095 | u32 value = 0; |
1096 | |
1097 | /* Set UEMPR register */ |
1098 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; |
1099 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; |
1100 | out_be32(uempr_register, value); |
1101 | |
1102 | /* Set UPSMR register */ |
1103 | setbits32(upsmr_register, automatic_flow_control_mode); |
1104 | |
1105 | value = in_be32(maccfg1_register); |
1106 | if (rx_flow_control_enable) |
1107 | value |= MACCFG1_FLOW_RX; |
1108 | if (tx_flow_control_enable) |
1109 | value |= MACCFG1_FLOW_TX; |
1110 | out_be32(maccfg1_register, value); |
1111 | |
1112 | return 0; |
1113 | } |
1114 | |
1115 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, |
1116 | int auto_zero_hardware_statistics, |
1117 | u32 __iomem *upsmr_register, |
1118 | u16 __iomem *uescr_register) |
1119 | { |
1120 | u16 uescr_value = 0; |
1121 | |
1122 | /* Enable hardware statistics gathering if requested */ |
1123 | if (enable_hardware_statistics) |
1124 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); |
1125 | |
1126 | /* Clear hardware statistics counters */ |
1127 | uescr_value = in_be16(uescr_register); |
1128 | uescr_value |= UESCR_CLRCNT; |
1129 | /* Automatically zero hardware statistics counters on read, |
1130 | if requested */ |
1131 | if (auto_zero_hardware_statistics) |
1132 | uescr_value |= UESCR_AUTOZ; |
1133 | out_be16(uescr_register, uescr_value); |
1134 | |
1135 | return 0; |
1136 | } |
1137 | |
1138 | static int init_firmware_statistics_gathering_mode(int |
1139 | enable_tx_firmware_statistics, |
1140 | int enable_rx_firmware_statistics, |
1141 | u32 __iomem *tx_rmon_base_ptr, |
1142 | u32 tx_firmware_statistics_structure_address, |
1143 | u32 __iomem *rx_rmon_base_ptr, |
1144 | u32 rx_firmware_statistics_structure_address, |
1145 | u16 __iomem *temoder_register, |
1146 | u32 __iomem *remoder_register) |
1147 | { |
1148 | /* Note: this function does not check if */ |
1149 | /* the parameters it receives are NULL */ |
1150 | |
1151 | if (enable_tx_firmware_statistics) { |
1152 | out_be32(tx_rmon_base_ptr, |
1153 | tx_firmware_statistics_structure_address); |
1154 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); |
1155 | } |
1156 | |
1157 | if (enable_rx_firmware_statistics) { |
1158 | out_be32(rx_rmon_base_ptr, |
1159 | rx_firmware_statistics_structure_address); |
1160 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); |
1161 | } |
1162 | |
1163 | return 0; |
1164 | } |
1165 | |
1166 | static int init_mac_station_addr_regs(u8 address_byte_0, |
1167 | u8 address_byte_1, |
1168 | u8 address_byte_2, |
1169 | u8 address_byte_3, |
1170 | u8 address_byte_4, |
1171 | u8 address_byte_5, |
1172 | u32 __iomem *macstnaddr1_register, |
1173 | u32 __iomem *macstnaddr2_register) |
1174 | { |
1175 | u32 value = 0; |
1176 | |
1177 | /* Example: for a station address of 0x12345678ABCD, */ |
1178 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ |
1179 | |
1180 | /* MACSTNADDR1 Register: */ |
1181 | |
1182 | /* 0 7 8 15 */ |
1183 | /* station address byte 5 station address byte 4 */ |
1184 | /* 16 23 24 31 */ |
1185 | /* station address byte 3 station address byte 2 */ |
1186 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); |
1187 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); |
1188 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); |
1189 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); |
1190 | |
1191 | out_be32(macstnaddr1_register, value); |
1192 | |
1193 | /* MACSTNADDR2 Register: */ |
1194 | |
1195 | /* 0 7 8 15 */ |
1196 | /* station address byte 1 station address byte 0 */ |
1197 | /* 16 23 24 31 */ |
1198 | /* reserved reserved */ |
1199 | value = 0; |
1200 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); |
1201 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); |
1202 | |
1203 | out_be32(macstnaddr2_register, value); |
1204 | |
1205 | return 0; |
1206 | } |
1207 | |
1208 | static int init_check_frame_length_mode(int length_check, |
1209 | u32 __iomem *maccfg2_register) |
1210 | { |
1211 | u32 value = 0; |
1212 | |
1213 | value = in_be32(maccfg2_register); |
1214 | |
1215 | if (length_check) |
1216 | value |= MACCFG2_LC; |
1217 | else |
1218 | value &= ~MACCFG2_LC; |
1219 | |
1220 | out_be32(maccfg2_register, value); |
1221 | return 0; |
1222 | } |
1223 | |
1224 | static int init_preamble_length(u8 preamble_length, |
1225 | u32 __iomem *maccfg2_register) |
1226 | { |
1227 | if ((preamble_length < 3) || (preamble_length > 7)) |
1228 | return -EINVAL; |
1229 | |
1230 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, |
1231 | preamble_length << MACCFG2_PREL_SHIFT); |
1232 | |
1233 | return 0; |
1234 | } |
1235 | |
1236 | static int init_rx_parameters(int reject_broadcast, |
1237 | int receive_short_frames, |
1238 | int promiscuous, u32 __iomem *upsmr_register) |
1239 | { |
1240 | u32 value = 0; |
1241 | |
1242 | value = in_be32(upsmr_register); |
1243 | |
1244 | if (reject_broadcast) |
1245 | value |= UCC_GETH_UPSMR_BRO; |
1246 | else |
1247 | value &= ~UCC_GETH_UPSMR_BRO; |
1248 | |
1249 | if (receive_short_frames) |
1250 | value |= UCC_GETH_UPSMR_RSH; |
1251 | else |
1252 | value &= ~UCC_GETH_UPSMR_RSH; |
1253 | |
1254 | if (promiscuous) |
1255 | value |= UCC_GETH_UPSMR_PRO; |
1256 | else |
1257 | value &= ~UCC_GETH_UPSMR_PRO; |
1258 | |
1259 | out_be32(upsmr_register, value); |
1260 | |
1261 | return 0; |
1262 | } |
1263 | |
1264 | static int init_max_rx_buff_len(u16 max_rx_buf_len, |
1265 | u16 __iomem *mrblr_register) |
1266 | { |
1267 | /* max_rx_buf_len value must be a multiple of 128 */ |
1268 | if ((max_rx_buf_len == 0) || |
1269 | (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) |
1270 | return -EINVAL; |
1271 | |
1272 | out_be16(mrblr_register, max_rx_buf_len); |
1273 | return 0; |
1274 | } |
1275 | |
1276 | static int init_min_frame_len(u16 min_frame_length, |
1277 | u16 __iomem *minflr_register, |
1278 | u16 __iomem *mrblr_register) |
1279 | { |
1280 | u16 mrblr_value = 0; |
1281 | |
1282 | mrblr_value = in_be16(mrblr_register); |
1283 | if (min_frame_length >= (mrblr_value - 4)) |
1284 | return -EINVAL; |
1285 | |
1286 | out_be16(minflr_register, min_frame_length); |
1287 | return 0; |
1288 | } |
1289 | |
1290 | static int adjust_enet_interface(struct ucc_geth_private *ugeth) |
1291 | { |
1292 | struct ucc_geth_info *ug_info; |
1293 | struct ucc_geth __iomem *ug_regs; |
1294 | struct ucc_fast __iomem *uf_regs; |
1295 | int ret_val; |
1296 | u32 upsmr, maccfg2; |
1297 | u16 value; |
1298 | |
1299 | ugeth_vdbg("%s: IN" , __func__); |
1300 | |
1301 | ug_info = ugeth->ug_info; |
1302 | ug_regs = ugeth->ug_regs; |
1303 | uf_regs = ugeth->uccf->uf_regs; |
1304 | |
1305 | /* Set MACCFG2 */ |
1306 | maccfg2 = in_be32(&ug_regs->maccfg2); |
1307 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; |
1308 | if ((ugeth->max_speed == SPEED_10) || |
1309 | (ugeth->max_speed == SPEED_100)) |
1310 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; |
1311 | else if (ugeth->max_speed == SPEED_1000) |
1312 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; |
1313 | maccfg2 |= ug_info->padAndCrc; |
1314 | out_be32(&ug_regs->maccfg2, maccfg2); |
1315 | |
1316 | /* Set UPSMR */ |
1317 | upsmr = in_be32(&uf_regs->upsmr); |
1318 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | |
1319 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); |
1320 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1321 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || |
1322 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || |
1323 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1324 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || |
1325 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1326 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) |
1327 | upsmr |= UCC_GETH_UPSMR_RPM; |
1328 | switch (ugeth->max_speed) { |
1329 | case SPEED_10: |
1330 | upsmr |= UCC_GETH_UPSMR_R10M; |
1331 | fallthrough; |
1332 | case SPEED_100: |
1333 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) |
1334 | upsmr |= UCC_GETH_UPSMR_RMM; |
1335 | } |
1336 | } |
1337 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1338 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1339 | upsmr |= UCC_GETH_UPSMR_TBIM; |
1340 | } |
1341 | if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII) |
1342 | upsmr |= UCC_GETH_UPSMR_SGMM; |
1343 | |
1344 | out_be32(&uf_regs->upsmr, upsmr); |
1345 | |
1346 | /* Disable autonegotiation in tbi mode, because by default it |
1347 | comes up in autonegotiation mode. */ |
1348 | /* Note that this depends on proper setting in utbipar register. */ |
1349 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1350 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1351 | struct ucc_geth_info *ug_info = ugeth->ug_info; |
1352 | struct phy_device *tbiphy; |
1353 | |
1354 | if (!ug_info->tbi_node) |
1355 | pr_warn("TBI mode requires that the device tree specify a tbi-handle\n" ); |
1356 | |
1357 | tbiphy = of_phy_find_device(phy_np: ug_info->tbi_node); |
1358 | if (!tbiphy) |
1359 | pr_warn("Could not get TBI device\n" ); |
1360 | |
1361 | value = phy_read(phydev: tbiphy, ENET_TBI_MII_CR); |
1362 | value &= ~0x1000; /* Turn off autonegotiation */ |
1363 | phy_write(phydev: tbiphy, ENET_TBI_MII_CR, val: value); |
1364 | |
1365 | put_device(dev: &tbiphy->mdio.dev); |
1366 | } |
1367 | |
1368 | init_check_frame_length_mode(length_check: ug_info->lengthCheckRx, maccfg2_register: &ug_regs->maccfg2); |
1369 | |
1370 | ret_val = init_preamble_length(preamble_length: ug_info->prel, maccfg2_register: &ug_regs->maccfg2); |
1371 | if (ret_val != 0) { |
1372 | if (netif_msg_probe(ugeth)) |
1373 | pr_err("Preamble length must be between 3 and 7 inclusive\n" ); |
1374 | return ret_val; |
1375 | } |
1376 | |
1377 | return 0; |
1378 | } |
1379 | |
1380 | static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) |
1381 | { |
1382 | struct ucc_fast_private *uccf; |
1383 | u32 cecr_subblock; |
1384 | u32 temp; |
1385 | int i = 10; |
1386 | |
1387 | uccf = ugeth->uccf; |
1388 | |
1389 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ |
1390 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); |
1391 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ |
1392 | |
1393 | /* Issue host command */ |
1394 | cecr_subblock = |
1395 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info.ucc_num); |
1396 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, device: cecr_subblock, |
1397 | QE_CR_PROTOCOL_ETHERNET, cmd_input: 0); |
1398 | |
1399 | /* Wait for command to complete */ |
1400 | do { |
1401 | msleep(msecs: 10); |
1402 | temp = in_be32(uccf->p_ucce); |
1403 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); |
1404 | |
1405 | uccf->stopped_tx = 1; |
1406 | |
1407 | return 0; |
1408 | } |
1409 | |
1410 | static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) |
1411 | { |
1412 | struct ucc_fast_private *uccf; |
1413 | u32 cecr_subblock; |
1414 | u8 temp; |
1415 | int i = 10; |
1416 | |
1417 | uccf = ugeth->uccf; |
1418 | |
1419 | /* Clear acknowledge bit */ |
1420 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); |
1421 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; |
1422 | out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); |
1423 | |
1424 | /* Keep issuing command and checking acknowledge bit until |
1425 | it is asserted, according to spec */ |
1426 | do { |
1427 | /* Issue host command */ |
1428 | cecr_subblock = |
1429 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info. |
1430 | ucc_num); |
1431 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, device: cecr_subblock, |
1432 | QE_CR_PROTOCOL_ETHERNET, cmd_input: 0); |
1433 | msleep(msecs: 10); |
1434 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); |
1435 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); |
1436 | |
1437 | uccf->stopped_rx = 1; |
1438 | |
1439 | return 0; |
1440 | } |
1441 | |
1442 | static int ugeth_restart_tx(struct ucc_geth_private *ugeth) |
1443 | { |
1444 | struct ucc_fast_private *uccf; |
1445 | u32 cecr_subblock; |
1446 | |
1447 | uccf = ugeth->uccf; |
1448 | |
1449 | cecr_subblock = |
1450 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info.ucc_num); |
1451 | qe_issue_cmd(QE_RESTART_TX, device: cecr_subblock, QE_CR_PROTOCOL_ETHERNET, cmd_input: 0); |
1452 | uccf->stopped_tx = 0; |
1453 | |
1454 | return 0; |
1455 | } |
1456 | |
1457 | static int ugeth_restart_rx(struct ucc_geth_private *ugeth) |
1458 | { |
1459 | struct ucc_fast_private *uccf; |
1460 | u32 cecr_subblock; |
1461 | |
1462 | uccf = ugeth->uccf; |
1463 | |
1464 | cecr_subblock = |
1465 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info.ucc_num); |
1466 | qe_issue_cmd(QE_RESTART_RX, device: cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
1467 | cmd_input: 0); |
1468 | uccf->stopped_rx = 0; |
1469 | |
1470 | return 0; |
1471 | } |
1472 | |
1473 | static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) |
1474 | { |
1475 | struct ucc_fast_private *uccf; |
1476 | int enabled_tx, enabled_rx; |
1477 | |
1478 | uccf = ugeth->uccf; |
1479 | |
1480 | /* check if the UCC number is in range. */ |
1481 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { |
1482 | if (netif_msg_probe(ugeth)) |
1483 | pr_err("ucc_num out of range\n" ); |
1484 | return -EINVAL; |
1485 | } |
1486 | |
1487 | enabled_tx = uccf->enabled_tx; |
1488 | enabled_rx = uccf->enabled_rx; |
1489 | |
1490 | /* Get Tx and Rx going again, in case this channel was actively |
1491 | disabled. */ |
1492 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) |
1493 | ugeth_restart_tx(ugeth); |
1494 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) |
1495 | ugeth_restart_rx(ugeth); |
1496 | |
1497 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ |
1498 | |
1499 | return 0; |
1500 | |
1501 | } |
1502 | |
1503 | static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) |
1504 | { |
1505 | struct ucc_fast_private *uccf; |
1506 | |
1507 | uccf = ugeth->uccf; |
1508 | |
1509 | /* check if the UCC number is in range. */ |
1510 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { |
1511 | if (netif_msg_probe(ugeth)) |
1512 | pr_err("ucc_num out of range\n" ); |
1513 | return -EINVAL; |
1514 | } |
1515 | |
1516 | /* Stop any transmissions */ |
1517 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) |
1518 | ugeth_graceful_stop_tx(ugeth); |
1519 | |
1520 | /* Stop any receptions */ |
1521 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) |
1522 | ugeth_graceful_stop_rx(ugeth); |
1523 | |
1524 | ucc_fast_disable(uccf: ugeth->uccf, mode); /* OK to do even if not enabled */ |
1525 | |
1526 | return 0; |
1527 | } |
1528 | |
1529 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) |
1530 | { |
1531 | /* Prevent any further xmits */ |
1532 | netif_tx_stop_all_queues(dev: ugeth->ndev); |
1533 | |
1534 | /* Disable the interrupt to avoid NAPI rescheduling. */ |
1535 | disable_irq(irq: ugeth->ug_info->uf_info.irq); |
1536 | |
1537 | /* Stop NAPI, and possibly wait for its completion. */ |
1538 | napi_disable(n: &ugeth->napi); |
1539 | } |
1540 | |
1541 | static void ugeth_activate(struct ucc_geth_private *ugeth) |
1542 | { |
1543 | napi_enable(n: &ugeth->napi); |
1544 | enable_irq(irq: ugeth->ug_info->uf_info.irq); |
1545 | |
1546 | /* allow to xmit again */ |
1547 | netif_tx_wake_all_queues(dev: ugeth->ndev); |
1548 | __netdev_watchdog_up(dev: ugeth->ndev); |
1549 | } |
1550 | |
1551 | /* Called every time the controller might need to be made |
1552 | * aware of new link state. The PHY code conveys this |
1553 | * information through variables in the ugeth structure, and this |
1554 | * function converts those variables into the appropriate |
1555 | * register values, and can bring down the device if needed. |
1556 | */ |
1557 | |
1558 | static void adjust_link(struct net_device *dev) |
1559 | { |
1560 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
1561 | struct ucc_geth __iomem *ug_regs; |
1562 | struct ucc_fast __iomem *uf_regs; |
1563 | struct phy_device *phydev = ugeth->phydev; |
1564 | int new_state = 0; |
1565 | |
1566 | ug_regs = ugeth->ug_regs; |
1567 | uf_regs = ugeth->uccf->uf_regs; |
1568 | |
1569 | if (phydev->link) { |
1570 | u32 tempval = in_be32(&ug_regs->maccfg2); |
1571 | u32 upsmr = in_be32(&uf_regs->upsmr); |
1572 | /* Now we make sure that we can be in full duplex mode. |
1573 | * If not, we operate in half-duplex mode. */ |
1574 | if (phydev->duplex != ugeth->oldduplex) { |
1575 | new_state = 1; |
1576 | if (!(phydev->duplex)) |
1577 | tempval &= ~(MACCFG2_FDX); |
1578 | else |
1579 | tempval |= MACCFG2_FDX; |
1580 | ugeth->oldduplex = phydev->duplex; |
1581 | } |
1582 | |
1583 | if (phydev->speed != ugeth->oldspeed) { |
1584 | new_state = 1; |
1585 | switch (phydev->speed) { |
1586 | case SPEED_1000: |
1587 | tempval = ((tempval & |
1588 | ~(MACCFG2_INTERFACE_MODE_MASK)) | |
1589 | MACCFG2_INTERFACE_MODE_BYTE); |
1590 | break; |
1591 | case SPEED_100: |
1592 | case SPEED_10: |
1593 | tempval = ((tempval & |
1594 | ~(MACCFG2_INTERFACE_MODE_MASK)) | |
1595 | MACCFG2_INTERFACE_MODE_NIBBLE); |
1596 | /* if reduced mode, re-set UPSMR.R10M */ |
1597 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1598 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || |
1599 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || |
1600 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1601 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || |
1602 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1603 | if (phydev->speed == SPEED_10) |
1604 | upsmr |= UCC_GETH_UPSMR_R10M; |
1605 | else |
1606 | upsmr &= ~UCC_GETH_UPSMR_R10M; |
1607 | } |
1608 | break; |
1609 | default: |
1610 | if (netif_msg_link(ugeth)) |
1611 | pr_warn( |
1612 | "%s: Ack! Speed (%d) is not 10/100/1000!" , |
1613 | dev->name, phydev->speed); |
1614 | break; |
1615 | } |
1616 | ugeth->oldspeed = phydev->speed; |
1617 | } |
1618 | |
1619 | if (!ugeth->oldlink) { |
1620 | new_state = 1; |
1621 | ugeth->oldlink = 1; |
1622 | } |
1623 | |
1624 | if (new_state) { |
1625 | /* |
1626 | * To change the MAC configuration we need to disable |
1627 | * the controller. To do so, we have to either grab |
1628 | * ugeth->lock, which is a bad idea since 'graceful |
1629 | * stop' commands might take quite a while, or we can |
1630 | * quiesce driver's activity. |
1631 | */ |
1632 | ugeth_quiesce(ugeth); |
1633 | ugeth_disable(ugeth, mode: COMM_DIR_RX_AND_TX); |
1634 | |
1635 | out_be32(&ug_regs->maccfg2, tempval); |
1636 | out_be32(&uf_regs->upsmr, upsmr); |
1637 | |
1638 | ugeth_enable(ugeth, mode: COMM_DIR_RX_AND_TX); |
1639 | ugeth_activate(ugeth); |
1640 | } |
1641 | } else if (ugeth->oldlink) { |
1642 | new_state = 1; |
1643 | ugeth->oldlink = 0; |
1644 | ugeth->oldspeed = 0; |
1645 | ugeth->oldduplex = -1; |
1646 | } |
1647 | |
1648 | if (new_state && netif_msg_link(ugeth)) |
1649 | phy_print_status(phydev); |
1650 | } |
1651 | |
1652 | /* Initialize TBI PHY interface for communicating with the |
1653 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1654 | * through the MDIO bus on each controller, treating it as a |
1655 | * "normal" PHY at the address found in the UTBIPA register. We assume |
1656 | * that the UTBIPA register is valid. Either the MDIO bus code will set |
1657 | * it to a value that doesn't conflict with other PHYs on the bus, or the |
1658 | * value doesn't matter, as there are no other PHYs on the bus. |
1659 | */ |
1660 | static void uec_configure_serdes(struct net_device *dev) |
1661 | { |
1662 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
1663 | struct ucc_geth_info *ug_info = ugeth->ug_info; |
1664 | struct phy_device *tbiphy; |
1665 | |
1666 | if (!ug_info->tbi_node) { |
1667 | dev_warn(&dev->dev, "SGMII mode requires that the device " |
1668 | "tree specify a tbi-handle\n" ); |
1669 | return; |
1670 | } |
1671 | |
1672 | tbiphy = of_phy_find_device(phy_np: ug_info->tbi_node); |
1673 | if (!tbiphy) { |
1674 | dev_err(&dev->dev, "error: Could not get TBI device\n" ); |
1675 | return; |
1676 | } |
1677 | |
1678 | /* |
1679 | * If the link is already up, we must already be ok, and don't need to |
1680 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1681 | * everything for us? Resetting it takes the link down and requires |
1682 | * several seconds for it to come back. |
1683 | */ |
1684 | if (phy_read(phydev: tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { |
1685 | put_device(dev: &tbiphy->mdio.dev); |
1686 | return; |
1687 | } |
1688 | |
1689 | /* Single clk mode, mii mode off(for serdes communication) */ |
1690 | phy_write(phydev: tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); |
1691 | |
1692 | phy_write(phydev: tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); |
1693 | |
1694 | phy_write(phydev: tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); |
1695 | |
1696 | put_device(dev: &tbiphy->mdio.dev); |
1697 | } |
1698 | |
1699 | /* Configure the PHY for dev. |
1700 | * returns 0 if success. -1 if failure |
1701 | */ |
1702 | static int init_phy(struct net_device *dev) |
1703 | { |
1704 | struct ucc_geth_private *priv = netdev_priv(dev); |
1705 | struct ucc_geth_info *ug_info = priv->ug_info; |
1706 | struct phy_device *phydev; |
1707 | |
1708 | priv->oldlink = 0; |
1709 | priv->oldspeed = 0; |
1710 | priv->oldduplex = -1; |
1711 | |
1712 | phydev = of_phy_connect(dev, phy_np: ug_info->phy_node, hndlr: &adjust_link, flags: 0, |
1713 | iface: priv->phy_interface); |
1714 | if (!phydev) { |
1715 | dev_err(&dev->dev, "Could not attach to PHY\n" ); |
1716 | return -ENODEV; |
1717 | } |
1718 | |
1719 | if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) |
1720 | uec_configure_serdes(dev); |
1721 | |
1722 | phy_set_max_speed(phydev, max_speed: priv->max_speed); |
1723 | |
1724 | priv->phydev = phydev; |
1725 | |
1726 | return 0; |
1727 | } |
1728 | |
1729 | static void ugeth_dump_regs(struct ucc_geth_private *ugeth) |
1730 | { |
1731 | #ifdef DEBUG |
1732 | ucc_fast_dump_regs(ugeth->uccf); |
1733 | dump_regs(ugeth); |
1734 | dump_bds(ugeth); |
1735 | #endif |
1736 | } |
1737 | |
1738 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * |
1739 | ugeth, |
1740 | enum enet_addr_type |
1741 | enet_addr_type) |
1742 | { |
1743 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
1744 | struct ucc_fast_private *uccf; |
1745 | enum comm_dir comm_dir; |
1746 | struct list_head *p_lh; |
1747 | u16 i, num; |
1748 | u32 __iomem *addr_h; |
1749 | u32 __iomem *addr_l; |
1750 | u8 *p_counter; |
1751 | |
1752 | uccf = ugeth->uccf; |
1753 | |
1754 | p_82xx_addr_filt = |
1755 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) |
1756 | ugeth->p_rx_glbl_pram->addressfiltering; |
1757 | |
1758 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { |
1759 | addr_h = &(p_82xx_addr_filt->gaddr_h); |
1760 | addr_l = &(p_82xx_addr_filt->gaddr_l); |
1761 | p_lh = &ugeth->group_hash_q; |
1762 | p_counter = &(ugeth->numGroupAddrInHash); |
1763 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { |
1764 | addr_h = &(p_82xx_addr_filt->iaddr_h); |
1765 | addr_l = &(p_82xx_addr_filt->iaddr_l); |
1766 | p_lh = &ugeth->ind_hash_q; |
1767 | p_counter = &(ugeth->numIndAddrInHash); |
1768 | } else |
1769 | return -EINVAL; |
1770 | |
1771 | comm_dir = 0; |
1772 | if (uccf->enabled_tx) |
1773 | comm_dir |= COMM_DIR_TX; |
1774 | if (uccf->enabled_rx) |
1775 | comm_dir |= COMM_DIR_RX; |
1776 | if (comm_dir) |
1777 | ugeth_disable(ugeth, mode: comm_dir); |
1778 | |
1779 | /* Clear the hash table. */ |
1780 | out_be32(addr_h, 0x00000000); |
1781 | out_be32(addr_l, 0x00000000); |
1782 | |
1783 | if (!p_lh) |
1784 | return 0; |
1785 | |
1786 | num = *p_counter; |
1787 | |
1788 | /* Delete all remaining CQ elements */ |
1789 | for (i = 0; i < num; i++) |
1790 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); |
1791 | |
1792 | *p_counter = 0; |
1793 | |
1794 | if (comm_dir) |
1795 | ugeth_enable(ugeth, mode: comm_dir); |
1796 | |
1797 | return 0; |
1798 | } |
1799 | |
1800 | static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, |
1801 | u8 paddr_num) |
1802 | { |
1803 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ |
1804 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ |
1805 | } |
1806 | |
1807 | static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) |
1808 | { |
1809 | struct ucc_geth_info *ug_info; |
1810 | struct ucc_fast_info *uf_info; |
1811 | u16 i, j; |
1812 | u8 __iomem *bd; |
1813 | |
1814 | |
1815 | ug_info = ugeth->ug_info; |
1816 | uf_info = &ug_info->uf_info; |
1817 | |
1818 | for (i = 0; i < ucc_geth_rx_queues(info: ugeth->ug_info); i++) { |
1819 | if (ugeth->p_rx_bd_ring[i]) { |
1820 | /* Return existing data buffers in ring */ |
1821 | bd = ugeth->p_rx_bd_ring[i]; |
1822 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { |
1823 | if (ugeth->rx_skbuff[i][j]) { |
1824 | dma_unmap_single(ugeth->dev, |
1825 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
1826 | ugeth->ug_info-> |
1827 | uf_info.max_rx_buf_length + |
1828 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, |
1829 | DMA_FROM_DEVICE); |
1830 | dev_kfree_skb_any( |
1831 | skb: ugeth->rx_skbuff[i][j]); |
1832 | ugeth->rx_skbuff[i][j] = NULL; |
1833 | } |
1834 | bd += sizeof(struct qe_bd); |
1835 | } |
1836 | |
1837 | kfree(objp: ugeth->rx_skbuff[i]); |
1838 | |
1839 | kfree(objp: ugeth->p_rx_bd_ring[i]); |
1840 | ugeth->p_rx_bd_ring[i] = NULL; |
1841 | } |
1842 | } |
1843 | |
1844 | } |
1845 | |
1846 | static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) |
1847 | { |
1848 | struct ucc_geth_info *ug_info; |
1849 | struct ucc_fast_info *uf_info; |
1850 | u16 i, j; |
1851 | u8 __iomem *bd; |
1852 | |
1853 | netdev_reset_queue(dev_queue: ugeth->ndev); |
1854 | |
1855 | ug_info = ugeth->ug_info; |
1856 | uf_info = &ug_info->uf_info; |
1857 | |
1858 | for (i = 0; i < ucc_geth_tx_queues(info: ugeth->ug_info); i++) { |
1859 | bd = ugeth->p_tx_bd_ring[i]; |
1860 | if (!bd) |
1861 | continue; |
1862 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
1863 | if (ugeth->tx_skbuff[i][j]) { |
1864 | dma_unmap_single(ugeth->dev, |
1865 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
1866 | (in_be32((u32 __iomem *)bd) & |
1867 | BD_LENGTH_MASK), |
1868 | DMA_TO_DEVICE); |
1869 | dev_kfree_skb_any(skb: ugeth->tx_skbuff[i][j]); |
1870 | ugeth->tx_skbuff[i][j] = NULL; |
1871 | } |
1872 | } |
1873 | |
1874 | kfree(objp: ugeth->tx_skbuff[i]); |
1875 | |
1876 | kfree(objp: ugeth->p_tx_bd_ring[i]); |
1877 | ugeth->p_tx_bd_ring[i] = NULL; |
1878 | } |
1879 | |
1880 | } |
1881 | |
1882 | static void ucc_geth_memclean(struct ucc_geth_private *ugeth) |
1883 | { |
1884 | if (!ugeth) |
1885 | return; |
1886 | |
1887 | if (ugeth->uccf) { |
1888 | ucc_fast_free(uccf: ugeth->uccf); |
1889 | ugeth->uccf = NULL; |
1890 | } |
1891 | |
1892 | qe_muram_free_addr(addr: ugeth->p_thread_data_tx); |
1893 | ugeth->p_thread_data_tx = NULL; |
1894 | |
1895 | qe_muram_free_addr(addr: ugeth->p_thread_data_rx); |
1896 | ugeth->p_thread_data_rx = NULL; |
1897 | |
1898 | qe_muram_free_addr(addr: ugeth->p_exf_glbl_param); |
1899 | ugeth->p_exf_glbl_param = NULL; |
1900 | |
1901 | qe_muram_free_addr(addr: ugeth->p_rx_glbl_pram); |
1902 | ugeth->p_rx_glbl_pram = NULL; |
1903 | |
1904 | qe_muram_free_addr(addr: ugeth->p_tx_glbl_pram); |
1905 | ugeth->p_tx_glbl_pram = NULL; |
1906 | |
1907 | qe_muram_free_addr(addr: ugeth->p_send_q_mem_reg); |
1908 | ugeth->p_send_q_mem_reg = NULL; |
1909 | |
1910 | qe_muram_free_addr(addr: ugeth->p_scheduler); |
1911 | ugeth->p_scheduler = NULL; |
1912 | |
1913 | qe_muram_free_addr(addr: ugeth->p_tx_fw_statistics_pram); |
1914 | ugeth->p_tx_fw_statistics_pram = NULL; |
1915 | |
1916 | qe_muram_free_addr(addr: ugeth->p_rx_fw_statistics_pram); |
1917 | ugeth->p_rx_fw_statistics_pram = NULL; |
1918 | |
1919 | qe_muram_free_addr(addr: ugeth->p_rx_irq_coalescing_tbl); |
1920 | ugeth->p_rx_irq_coalescing_tbl = NULL; |
1921 | |
1922 | qe_muram_free_addr(addr: ugeth->p_rx_bd_qs_tbl); |
1923 | ugeth->p_rx_bd_qs_tbl = NULL; |
1924 | |
1925 | if (ugeth->p_init_enet_param_shadow) { |
1926 | return_init_enet_entries(ugeth, |
1927 | p_start: &(ugeth->p_init_enet_param_shadow-> |
1928 | rxthread[0]), |
1929 | ENET_INIT_PARAM_MAX_ENTRIES_RX, |
1930 | risc: ugeth->ug_info->riscRx, skip_page_for_first_entry: 1); |
1931 | return_init_enet_entries(ugeth, |
1932 | p_start: &(ugeth->p_init_enet_param_shadow-> |
1933 | txthread[0]), |
1934 | ENET_INIT_PARAM_MAX_ENTRIES_TX, |
1935 | risc: ugeth->ug_info->riscTx, skip_page_for_first_entry: 0); |
1936 | kfree(objp: ugeth->p_init_enet_param_shadow); |
1937 | ugeth->p_init_enet_param_shadow = NULL; |
1938 | } |
1939 | ucc_geth_free_tx(ugeth); |
1940 | ucc_geth_free_rx(ugeth); |
1941 | while (!list_empty(head: &ugeth->group_hash_q)) |
1942 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY |
1943 | (dequeue(&ugeth->group_hash_q))); |
1944 | while (!list_empty(head: &ugeth->ind_hash_q)) |
1945 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY |
1946 | (dequeue(&ugeth->ind_hash_q))); |
1947 | if (ugeth->ug_regs) { |
1948 | iounmap(addr: ugeth->ug_regs); |
1949 | ugeth->ug_regs = NULL; |
1950 | } |
1951 | } |
1952 | |
1953 | static void ucc_geth_set_multi(struct net_device *dev) |
1954 | { |
1955 | struct ucc_geth_private *ugeth; |
1956 | struct netdev_hw_addr *ha; |
1957 | struct ucc_fast __iomem *uf_regs; |
1958 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
1959 | |
1960 | ugeth = netdev_priv(dev); |
1961 | |
1962 | uf_regs = ugeth->uccf->uf_regs; |
1963 | |
1964 | if (dev->flags & IFF_PROMISC) { |
1965 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
1966 | } else { |
1967 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
1968 | |
1969 | p_82xx_addr_filt = |
1970 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
1971 | p_rx_glbl_pram->addressfiltering; |
1972 | |
1973 | if (dev->flags & IFF_ALLMULTI) { |
1974 | /* Catch all multicast addresses, so set the |
1975 | * filter to all 1's. |
1976 | */ |
1977 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); |
1978 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); |
1979 | } else { |
1980 | /* Clear filter and add the addresses in the list. |
1981 | */ |
1982 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); |
1983 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); |
1984 | |
1985 | netdev_for_each_mc_addr(ha, dev) { |
1986 | /* Ask CPM to run CRC and set bit in |
1987 | * filter mask. |
1988 | */ |
1989 | hw_add_addr_in_hash(ugeth, p_enet_addr: ha->addr); |
1990 | } |
1991 | } |
1992 | } |
1993 | } |
1994 | |
1995 | static void ucc_geth_stop(struct ucc_geth_private *ugeth) |
1996 | { |
1997 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; |
1998 | struct phy_device *phydev = ugeth->phydev; |
1999 | |
2000 | ugeth_vdbg("%s: IN" , __func__); |
2001 | |
2002 | /* |
2003 | * Tell the kernel the link is down. |
2004 | * Must be done before disabling the controller |
2005 | * or deadlock may happen. |
2006 | */ |
2007 | phy_stop(phydev); |
2008 | |
2009 | /* Disable the controller */ |
2010 | ugeth_disable(ugeth, mode: COMM_DIR_RX_AND_TX); |
2011 | |
2012 | /* Mask all interrupts */ |
2013 | out_be32(ugeth->uccf->p_uccm, 0x00000000); |
2014 | |
2015 | /* Clear all interrupts */ |
2016 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); |
2017 | |
2018 | /* Disable Rx and Tx */ |
2019 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
2020 | |
2021 | ucc_geth_memclean(ugeth); |
2022 | } |
2023 | |
2024 | static int ucc_struct_init(struct ucc_geth_private *ugeth) |
2025 | { |
2026 | struct ucc_geth_info *ug_info; |
2027 | struct ucc_fast_info *uf_info; |
2028 | int i; |
2029 | |
2030 | ug_info = ugeth->ug_info; |
2031 | uf_info = &ug_info->uf_info; |
2032 | |
2033 | /* Rx BD lengths */ |
2034 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) { |
2035 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || |
2036 | (ug_info->bdRingLenRx[i] % |
2037 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { |
2038 | if (netif_msg_probe(ugeth)) |
2039 | pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n" ); |
2040 | return -EINVAL; |
2041 | } |
2042 | } |
2043 | |
2044 | /* Tx BD lengths */ |
2045 | for (i = 0; i < ucc_geth_tx_queues(info: ug_info); i++) { |
2046 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { |
2047 | if (netif_msg_probe(ugeth)) |
2048 | pr_err("Tx BD ring length must be no smaller than 2\n" ); |
2049 | return -EINVAL; |
2050 | } |
2051 | } |
2052 | |
2053 | /* mrblr */ |
2054 | if ((uf_info->max_rx_buf_length == 0) || |
2055 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { |
2056 | if (netif_msg_probe(ugeth)) |
2057 | pr_err("max_rx_buf_length must be non-zero multiple of 128\n" ); |
2058 | return -EINVAL; |
2059 | } |
2060 | |
2061 | /* num Tx queues */ |
2062 | if (ucc_geth_tx_queues(info: ug_info) > NUM_TX_QUEUES) { |
2063 | if (netif_msg_probe(ugeth)) |
2064 | pr_err("number of tx queues too large\n" ); |
2065 | return -EINVAL; |
2066 | } |
2067 | |
2068 | /* num Rx queues */ |
2069 | if (ucc_geth_rx_queues(info: ug_info) > NUM_RX_QUEUES) { |
2070 | if (netif_msg_probe(ugeth)) |
2071 | pr_err("number of rx queues too large\n" ); |
2072 | return -EINVAL; |
2073 | } |
2074 | |
2075 | /* l2qt */ |
2076 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { |
2077 | if (ug_info->l2qt[i] >= ucc_geth_rx_queues(info: ug_info)) { |
2078 | if (netif_msg_probe(ugeth)) |
2079 | pr_err("VLAN priority table entry must not be larger than number of Rx queues\n" ); |
2080 | return -EINVAL; |
2081 | } |
2082 | } |
2083 | |
2084 | /* l3qt */ |
2085 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { |
2086 | if (ug_info->l3qt[i] >= ucc_geth_rx_queues(info: ug_info)) { |
2087 | if (netif_msg_probe(ugeth)) |
2088 | pr_err("IP priority table entry must not be larger than number of Rx queues\n" ); |
2089 | return -EINVAL; |
2090 | } |
2091 | } |
2092 | |
2093 | if (ug_info->cam && !ug_info->ecamptr) { |
2094 | if (netif_msg_probe(ugeth)) |
2095 | pr_err("If cam mode is chosen, must supply cam ptr\n" ); |
2096 | return -EINVAL; |
2097 | } |
2098 | |
2099 | if ((ug_info->numStationAddresses != |
2100 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && |
2101 | ug_info->rxExtendedFiltering) { |
2102 | if (netif_msg_probe(ugeth)) |
2103 | pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n" ); |
2104 | return -EINVAL; |
2105 | } |
2106 | |
2107 | /* Generate uccm_mask for receive */ |
2108 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ |
2109 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) |
2110 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); |
2111 | |
2112 | for (i = 0; i < ucc_geth_tx_queues(info: ug_info); i++) |
2113 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); |
2114 | /* Initialize the general fast UCC block. */ |
2115 | if (ucc_fast_init(uf_info, uccf_ret: &ugeth->uccf)) { |
2116 | if (netif_msg_probe(ugeth)) |
2117 | pr_err("Failed to init uccf\n" ); |
2118 | return -ENOMEM; |
2119 | } |
2120 | |
2121 | /* read the number of risc engines, update the riscTx and riscRx |
2122 | * if there are 4 riscs in QE |
2123 | */ |
2124 | if (qe_get_num_of_risc() == 4) { |
2125 | ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; |
2126 | ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; |
2127 | } |
2128 | |
2129 | ugeth->ug_regs = ioremap(offset: uf_info->regs, size: sizeof(*ugeth->ug_regs)); |
2130 | if (!ugeth->ug_regs) { |
2131 | if (netif_msg_probe(ugeth)) |
2132 | pr_err("Failed to ioremap regs\n" ); |
2133 | return -ENOMEM; |
2134 | } |
2135 | |
2136 | return 0; |
2137 | } |
2138 | |
2139 | static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) |
2140 | { |
2141 | struct ucc_geth_info *ug_info; |
2142 | struct ucc_fast_info *uf_info; |
2143 | int length; |
2144 | u16 i, j; |
2145 | u8 __iomem *bd; |
2146 | |
2147 | ug_info = ugeth->ug_info; |
2148 | uf_info = &ug_info->uf_info; |
2149 | |
2150 | /* Allocate Tx bds */ |
2151 | for (j = 0; j < ucc_geth_tx_queues(info: ug_info); j++) { |
2152 | u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT, |
2153 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT); |
2154 | u32 alloc; |
2155 | |
2156 | length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd); |
2157 | alloc = round_up(length, align); |
2158 | alloc = roundup_pow_of_two(alloc); |
2159 | |
2160 | ugeth->p_tx_bd_ring[j] = kmalloc(size: alloc, GFP_KERNEL); |
2161 | |
2162 | if (!ugeth->p_tx_bd_ring[j]) { |
2163 | if (netif_msg_ifup(ugeth)) |
2164 | pr_err("Can not allocate memory for Tx bd rings\n" ); |
2165 | return -ENOMEM; |
2166 | } |
2167 | /* Zero unused end of bd ring, according to spec */ |
2168 | memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length); |
2169 | } |
2170 | |
2171 | /* Init Tx bds */ |
2172 | for (j = 0; j < ucc_geth_tx_queues(info: ug_info); j++) { |
2173 | /* Setup the skbuff rings */ |
2174 | ugeth->tx_skbuff[j] = |
2175 | kcalloc(n: ugeth->ug_info->bdRingLenTx[j], |
2176 | size: sizeof(struct sk_buff *), GFP_KERNEL); |
2177 | |
2178 | if (ugeth->tx_skbuff[j] == NULL) { |
2179 | if (netif_msg_ifup(ugeth)) |
2180 | pr_err("Could not allocate tx_skbuff\n" ); |
2181 | return -ENOMEM; |
2182 | } |
2183 | |
2184 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; |
2185 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; |
2186 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { |
2187 | /* clear bd buffer */ |
2188 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
2189 | /* set bd status and length */ |
2190 | out_be32((u32 __iomem *)bd, 0); |
2191 | bd += sizeof(struct qe_bd); |
2192 | } |
2193 | bd -= sizeof(struct qe_bd); |
2194 | /* set bd status and length */ |
2195 | out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ |
2196 | } |
2197 | |
2198 | return 0; |
2199 | } |
2200 | |
2201 | static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) |
2202 | { |
2203 | struct ucc_geth_info *ug_info; |
2204 | struct ucc_fast_info *uf_info; |
2205 | int length; |
2206 | u16 i, j; |
2207 | u8 __iomem *bd; |
2208 | |
2209 | ug_info = ugeth->ug_info; |
2210 | uf_info = &ug_info->uf_info; |
2211 | |
2212 | /* Allocate Rx bds */ |
2213 | for (j = 0; j < ucc_geth_rx_queues(info: ug_info); j++) { |
2214 | u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT; |
2215 | u32 alloc; |
2216 | |
2217 | length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); |
2218 | alloc = round_up(length, align); |
2219 | alloc = roundup_pow_of_two(alloc); |
2220 | |
2221 | ugeth->p_rx_bd_ring[j] = kmalloc(size: alloc, GFP_KERNEL); |
2222 | if (!ugeth->p_rx_bd_ring[j]) { |
2223 | if (netif_msg_ifup(ugeth)) |
2224 | pr_err("Can not allocate memory for Rx bd rings\n" ); |
2225 | return -ENOMEM; |
2226 | } |
2227 | } |
2228 | |
2229 | /* Init Rx bds */ |
2230 | for (j = 0; j < ucc_geth_rx_queues(info: ug_info); j++) { |
2231 | /* Setup the skbuff rings */ |
2232 | ugeth->rx_skbuff[j] = |
2233 | kcalloc(n: ugeth->ug_info->bdRingLenRx[j], |
2234 | size: sizeof(struct sk_buff *), GFP_KERNEL); |
2235 | |
2236 | if (ugeth->rx_skbuff[j] == NULL) { |
2237 | if (netif_msg_ifup(ugeth)) |
2238 | pr_err("Could not allocate rx_skbuff\n" ); |
2239 | return -ENOMEM; |
2240 | } |
2241 | |
2242 | ugeth->skb_currx[j] = 0; |
2243 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; |
2244 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { |
2245 | /* set bd status and length */ |
2246 | out_be32((u32 __iomem *)bd, R_I); |
2247 | /* clear bd buffer */ |
2248 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
2249 | bd += sizeof(struct qe_bd); |
2250 | } |
2251 | bd -= sizeof(struct qe_bd); |
2252 | /* set bd status and length */ |
2253 | out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ |
2254 | } |
2255 | |
2256 | return 0; |
2257 | } |
2258 | |
2259 | static int ucc_geth_startup(struct ucc_geth_private *ugeth) |
2260 | { |
2261 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
2262 | struct ucc_geth_init_pram __iomem *p_init_enet_pram; |
2263 | struct ucc_fast_private *uccf; |
2264 | struct ucc_geth_info *ug_info; |
2265 | struct ucc_fast_info *uf_info; |
2266 | struct ucc_fast __iomem *uf_regs; |
2267 | struct ucc_geth __iomem *ug_regs; |
2268 | int ret_val = -EINVAL; |
2269 | u32 remoder = UCC_GETH_REMODER_INIT; |
2270 | u32 init_enet_pram_offset, cecr_subblock, command; |
2271 | u32 ifstat, i, j, size, l2qt, l3qt; |
2272 | u16 temoder = UCC_GETH_TEMODER_INIT; |
2273 | u8 function_code = 0; |
2274 | u8 __iomem *endOfRing; |
2275 | u8 numThreadsRxNumerical, numThreadsTxNumerical; |
2276 | s32 rx_glbl_pram_offset, tx_glbl_pram_offset; |
2277 | |
2278 | ugeth_vdbg("%s: IN" , __func__); |
2279 | uccf = ugeth->uccf; |
2280 | ug_info = ugeth->ug_info; |
2281 | uf_info = &ug_info->uf_info; |
2282 | uf_regs = uccf->uf_regs; |
2283 | ug_regs = ugeth->ug_regs; |
2284 | |
2285 | numThreadsRxNumerical = ucc_geth_thread_count(idx: ug_info->numThreadsRx); |
2286 | if (!numThreadsRxNumerical) { |
2287 | if (netif_msg_ifup(ugeth)) |
2288 | pr_err("Bad number of Rx threads value\n" ); |
2289 | return -EINVAL; |
2290 | } |
2291 | |
2292 | numThreadsTxNumerical = ucc_geth_thread_count(idx: ug_info->numThreadsTx); |
2293 | if (!numThreadsTxNumerical) { |
2294 | if (netif_msg_ifup(ugeth)) |
2295 | pr_err("Bad number of Tx threads value\n" ); |
2296 | return -EINVAL; |
2297 | } |
2298 | |
2299 | /* Calculate rx_extended_features */ |
2300 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || |
2301 | ug_info->ipAddressAlignment || |
2302 | (ug_info->numStationAddresses != |
2303 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); |
2304 | |
2305 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || |
2306 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || |
2307 | (ug_info->vlanOperationNonTagged != |
2308 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); |
2309 | |
2310 | init_default_reg_vals(upsmr_register: &uf_regs->upsmr, |
2311 | maccfg1_register: &ug_regs->maccfg1, maccfg2_register: &ug_regs->maccfg2); |
2312 | |
2313 | /* Set UPSMR */ |
2314 | /* For more details see the hardware spec. */ |
2315 | init_rx_parameters(reject_broadcast: ug_info->bro, |
2316 | receive_short_frames: ug_info->rsh, promiscuous: ug_info->pro, upsmr_register: &uf_regs->upsmr); |
2317 | |
2318 | /* We're going to ignore other registers for now, */ |
2319 | /* except as needed to get up and running */ |
2320 | |
2321 | /* Set MACCFG1 */ |
2322 | /* For more details see the hardware spec. */ |
2323 | init_flow_control_params(automatic_flow_control_mode: ug_info->aufc, |
2324 | rx_flow_control_enable: ug_info->receiveFlowControl, |
2325 | tx_flow_control_enable: ug_info->transmitFlowControl, |
2326 | pause_period: ug_info->pausePeriod, |
2327 | extension_field: ug_info->extensionField, |
2328 | upsmr_register: &uf_regs->upsmr, |
2329 | uempr_register: &ug_regs->uempr, maccfg1_register: &ug_regs->maccfg1); |
2330 | |
2331 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
2332 | |
2333 | /* Set IPGIFG */ |
2334 | /* For more details see the hardware spec. */ |
2335 | ret_val = init_inter_frame_gap_params(non_btb_cs_ipg: ug_info->nonBackToBackIfgPart1, |
2336 | non_btb_ipg: ug_info->nonBackToBackIfgPart2, |
2337 | min_ifg: ug_info-> |
2338 | miminumInterFrameGapEnforcement, |
2339 | btb_ipg: ug_info->backToBackInterFrameGap, |
2340 | ipgifg_register: &ug_regs->ipgifg); |
2341 | if (ret_val != 0) { |
2342 | if (netif_msg_ifup(ugeth)) |
2343 | pr_err("IPGIFG initialization parameter too large\n" ); |
2344 | return ret_val; |
2345 | } |
2346 | |
2347 | /* Set HAFDUP */ |
2348 | /* For more details see the hardware spec. */ |
2349 | ret_val = init_half_duplex_params(alt_beb: ug_info->altBeb, |
2350 | back_pressure_no_backoff: ug_info->backPressureNoBackoff, |
2351 | no_backoff: ug_info->noBackoff, |
2352 | excess_defer: ug_info->excessDefer, |
2353 | alt_beb_truncation: ug_info->altBebTruncation, |
2354 | max_retransmissions: ug_info->maxRetransmission, |
2355 | collision_window: ug_info->collisionWindow, |
2356 | hafdup_register: &ug_regs->hafdup); |
2357 | if (ret_val != 0) { |
2358 | if (netif_msg_ifup(ugeth)) |
2359 | pr_err("Half Duplex initialization parameter too large\n" ); |
2360 | return ret_val; |
2361 | } |
2362 | |
2363 | /* Set IFSTAT */ |
2364 | /* For more details see the hardware spec. */ |
2365 | /* Read only - resets upon read */ |
2366 | ifstat = in_be32(&ug_regs->ifstat); |
2367 | |
2368 | /* Clear UEMPR */ |
2369 | /* For more details see the hardware spec. */ |
2370 | out_be32(&ug_regs->uempr, 0); |
2371 | |
2372 | /* Set UESCR */ |
2373 | /* For more details see the hardware spec. */ |
2374 | init_hw_statistics_gathering_mode(enable_hardware_statistics: (ug_info->statisticsMode & |
2375 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), |
2376 | auto_zero_hardware_statistics: 0, upsmr_register: &uf_regs->upsmr, uescr_register: &ug_regs->uescr); |
2377 | |
2378 | ret_val = ucc_geth_alloc_tx(ugeth); |
2379 | if (ret_val != 0) |
2380 | return ret_val; |
2381 | |
2382 | ret_val = ucc_geth_alloc_rx(ugeth); |
2383 | if (ret_val != 0) |
2384 | return ret_val; |
2385 | |
2386 | /* |
2387 | * Global PRAM |
2388 | */ |
2389 | /* Tx global PRAM */ |
2390 | /* Allocate global tx parameter RAM page */ |
2391 | tx_glbl_pram_offset = |
2392 | qe_muram_alloc(size: sizeof(struct ucc_geth_tx_global_pram), |
2393 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); |
2394 | if (tx_glbl_pram_offset < 0) { |
2395 | if (netif_msg_ifup(ugeth)) |
2396 | pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n" ); |
2397 | return -ENOMEM; |
2398 | } |
2399 | ugeth->p_tx_glbl_pram = qe_muram_addr(offset: tx_glbl_pram_offset); |
2400 | /* Fill global PRAM */ |
2401 | |
2402 | /* TQPTR */ |
2403 | /* Size varies with number of Tx threads */ |
2404 | ugeth->thread_dat_tx_offset = |
2405 | qe_muram_alloc(size: numThreadsTxNumerical * |
2406 | sizeof(struct ucc_geth_thread_data_tx) + |
2407 | 32 * (numThreadsTxNumerical == 1), |
2408 | UCC_GETH_THREAD_DATA_ALIGNMENT); |
2409 | if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { |
2410 | if (netif_msg_ifup(ugeth)) |
2411 | pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n" ); |
2412 | return -ENOMEM; |
2413 | } |
2414 | |
2415 | ugeth->p_thread_data_tx = |
2416 | (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(offset: ugeth-> |
2417 | thread_dat_tx_offset); |
2418 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); |
2419 | |
2420 | /* vtagtable */ |
2421 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) |
2422 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], |
2423 | ug_info->vtagtable[i]); |
2424 | |
2425 | /* iphoffset */ |
2426 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) |
2427 | out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], |
2428 | ug_info->iphoffset[i]); |
2429 | |
2430 | /* SQPTR */ |
2431 | /* Size varies with number of Tx queues */ |
2432 | ugeth->send_q_mem_reg_offset = |
2433 | qe_muram_alloc(size: ucc_geth_tx_queues(info: ug_info) * |
2434 | sizeof(struct ucc_geth_send_queue_qd), |
2435 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); |
2436 | if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { |
2437 | if (netif_msg_ifup(ugeth)) |
2438 | pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n" ); |
2439 | return -ENOMEM; |
2440 | } |
2441 | |
2442 | ugeth->p_send_q_mem_reg = |
2443 | (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(offset: ugeth-> |
2444 | send_q_mem_reg_offset); |
2445 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); |
2446 | |
2447 | /* Setup the table */ |
2448 | /* Assume BD rings are already established */ |
2449 | for (i = 0; i < ucc_geth_tx_queues(info: ug_info); i++) { |
2450 | endOfRing = |
2451 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - |
2452 | 1) * sizeof(struct qe_bd); |
2453 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, |
2454 | (u32) virt_to_phys(address: ugeth->p_tx_bd_ring[i])); |
2455 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. |
2456 | last_bd_completed_address, |
2457 | (u32) virt_to_phys(address: endOfRing)); |
2458 | } |
2459 | |
2460 | /* schedulerbasepointer */ |
2461 | |
2462 | if (ucc_geth_tx_queues(info: ug_info) > 1) { |
2463 | /* scheduler exists only if more than 1 tx queue */ |
2464 | ugeth->scheduler_offset = |
2465 | qe_muram_alloc(size: sizeof(struct ucc_geth_scheduler), |
2466 | UCC_GETH_SCHEDULER_ALIGNMENT); |
2467 | if (IS_ERR_VALUE(ugeth->scheduler_offset)) { |
2468 | if (netif_msg_ifup(ugeth)) |
2469 | pr_err("Can not allocate DPRAM memory for p_scheduler\n" ); |
2470 | return -ENOMEM; |
2471 | } |
2472 | |
2473 | ugeth->p_scheduler = |
2474 | (struct ucc_geth_scheduler __iomem *) qe_muram_addr(offset: ugeth-> |
2475 | scheduler_offset); |
2476 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, |
2477 | ugeth->scheduler_offset); |
2478 | |
2479 | /* Set values in scheduler */ |
2480 | out_be32(&ugeth->p_scheduler->mblinterval, |
2481 | ug_info->mblinterval); |
2482 | out_be16(&ugeth->p_scheduler->nortsrbytetime, |
2483 | ug_info->nortsrbytetime); |
2484 | out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); |
2485 | out_8(&ugeth->p_scheduler->strictpriorityq, |
2486 | ug_info->strictpriorityq); |
2487 | out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); |
2488 | out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); |
2489 | for (i = 0; i < NUM_TX_QUEUES; i++) |
2490 | out_8(&ugeth->p_scheduler->weightfactor[i], |
2491 | ug_info->weightfactor[i]); |
2492 | |
2493 | /* Set pointers to cpucount registers in scheduler */ |
2494 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); |
2495 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); |
2496 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); |
2497 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); |
2498 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); |
2499 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); |
2500 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); |
2501 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); |
2502 | } |
2503 | |
2504 | /* schedulerbasepointer */ |
2505 | /* TxRMON_PTR (statistics) */ |
2506 | if (ug_info-> |
2507 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { |
2508 | ugeth->tx_fw_statistics_pram_offset = |
2509 | qe_muram_alloc(size: sizeof |
2510 | (struct ucc_geth_tx_firmware_statistics_pram), |
2511 | UCC_GETH_TX_STATISTICS_ALIGNMENT); |
2512 | if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { |
2513 | if (netif_msg_ifup(ugeth)) |
2514 | pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n" ); |
2515 | return -ENOMEM; |
2516 | } |
2517 | ugeth->p_tx_fw_statistics_pram = |
2518 | (struct ucc_geth_tx_firmware_statistics_pram __iomem *) |
2519 | qe_muram_addr(offset: ugeth->tx_fw_statistics_pram_offset); |
2520 | } |
2521 | |
2522 | /* temoder */ |
2523 | /* Already has speed set */ |
2524 | |
2525 | if (ucc_geth_tx_queues(info: ug_info) > 1) |
2526 | temoder |= TEMODER_SCHEDULER_ENABLE; |
2527 | if (ug_info->ipCheckSumGenerate) |
2528 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; |
2529 | temoder |= ((ucc_geth_tx_queues(info: ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); |
2530 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); |
2531 | |
2532 | /* Function code register value to be used later */ |
2533 | function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; |
2534 | /* Required for QE */ |
2535 | |
2536 | /* function code register */ |
2537 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); |
2538 | |
2539 | /* Rx global PRAM */ |
2540 | /* Allocate global rx parameter RAM page */ |
2541 | rx_glbl_pram_offset = |
2542 | qe_muram_alloc(size: sizeof(struct ucc_geth_rx_global_pram), |
2543 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); |
2544 | if (rx_glbl_pram_offset < 0) { |
2545 | if (netif_msg_ifup(ugeth)) |
2546 | pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n" ); |
2547 | return -ENOMEM; |
2548 | } |
2549 | ugeth->p_rx_glbl_pram = qe_muram_addr(offset: rx_glbl_pram_offset); |
2550 | /* Fill global PRAM */ |
2551 | |
2552 | /* RQPTR */ |
2553 | /* Size varies with number of Rx threads */ |
2554 | ugeth->thread_dat_rx_offset = |
2555 | qe_muram_alloc(size: numThreadsRxNumerical * |
2556 | sizeof(struct ucc_geth_thread_data_rx), |
2557 | UCC_GETH_THREAD_DATA_ALIGNMENT); |
2558 | if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { |
2559 | if (netif_msg_ifup(ugeth)) |
2560 | pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n" ); |
2561 | return -ENOMEM; |
2562 | } |
2563 | |
2564 | ugeth->p_thread_data_rx = |
2565 | (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(offset: ugeth-> |
2566 | thread_dat_rx_offset); |
2567 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); |
2568 | |
2569 | /* typeorlen */ |
2570 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); |
2571 | |
2572 | /* rxrmonbaseptr (statistics) */ |
2573 | if (ug_info-> |
2574 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { |
2575 | ugeth->rx_fw_statistics_pram_offset = |
2576 | qe_muram_alloc(size: sizeof |
2577 | (struct ucc_geth_rx_firmware_statistics_pram), |
2578 | UCC_GETH_RX_STATISTICS_ALIGNMENT); |
2579 | if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { |
2580 | if (netif_msg_ifup(ugeth)) |
2581 | pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n" ); |
2582 | return -ENOMEM; |
2583 | } |
2584 | ugeth->p_rx_fw_statistics_pram = |
2585 | (struct ucc_geth_rx_firmware_statistics_pram __iomem *) |
2586 | qe_muram_addr(offset: ugeth->rx_fw_statistics_pram_offset); |
2587 | } |
2588 | |
2589 | /* intCoalescingPtr */ |
2590 | |
2591 | /* Size varies with number of Rx queues */ |
2592 | ugeth->rx_irq_coalescing_tbl_offset = |
2593 | qe_muram_alloc(size: ucc_geth_rx_queues(info: ug_info) * |
2594 | sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) |
2595 | + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); |
2596 | if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { |
2597 | if (netif_msg_ifup(ugeth)) |
2598 | pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n" ); |
2599 | return -ENOMEM; |
2600 | } |
2601 | |
2602 | ugeth->p_rx_irq_coalescing_tbl = |
2603 | (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) |
2604 | qe_muram_addr(offset: ugeth->rx_irq_coalescing_tbl_offset); |
2605 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, |
2606 | ugeth->rx_irq_coalescing_tbl_offset); |
2607 | |
2608 | /* Fill interrupt coalescing table */ |
2609 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) { |
2610 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. |
2611 | interruptcoalescingmaxvalue, |
2612 | ug_info->interruptcoalescingmaxvalue[i]); |
2613 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. |
2614 | interruptcoalescingcounter, |
2615 | ug_info->interruptcoalescingmaxvalue[i]); |
2616 | } |
2617 | |
2618 | /* MRBLR */ |
2619 | init_max_rx_buff_len(max_rx_buf_len: uf_info->max_rx_buf_length, |
2620 | mrblr_register: &ugeth->p_rx_glbl_pram->mrblr); |
2621 | /* MFLR */ |
2622 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); |
2623 | /* MINFLR */ |
2624 | init_min_frame_len(min_frame_length: ug_info->minFrameLength, |
2625 | minflr_register: &ugeth->p_rx_glbl_pram->minflr, |
2626 | mrblr_register: &ugeth->p_rx_glbl_pram->mrblr); |
2627 | /* MAXD1 */ |
2628 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); |
2629 | /* MAXD2 */ |
2630 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); |
2631 | |
2632 | /* l2qt */ |
2633 | l2qt = 0; |
2634 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) |
2635 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); |
2636 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); |
2637 | |
2638 | /* l3qt */ |
2639 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { |
2640 | l3qt = 0; |
2641 | for (i = 0; i < 8; i++) |
2642 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); |
2643 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); |
2644 | } |
2645 | |
2646 | /* vlantype */ |
2647 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); |
2648 | |
2649 | /* vlantci */ |
2650 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); |
2651 | |
2652 | /* ecamptr */ |
2653 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); |
2654 | |
2655 | /* RBDQPTR */ |
2656 | /* Size varies with number of Rx queues */ |
2657 | ugeth->rx_bd_qs_tbl_offset = |
2658 | qe_muram_alloc(size: ucc_geth_rx_queues(info: ug_info) * |
2659 | (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2660 | sizeof(struct ucc_geth_rx_prefetched_bds)), |
2661 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); |
2662 | if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { |
2663 | if (netif_msg_ifup(ugeth)) |
2664 | pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n" ); |
2665 | return -ENOMEM; |
2666 | } |
2667 | |
2668 | ugeth->p_rx_bd_qs_tbl = |
2669 | (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(offset: ugeth-> |
2670 | rx_bd_qs_tbl_offset); |
2671 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); |
2672 | |
2673 | /* Setup the table */ |
2674 | /* Assume BD rings are already established */ |
2675 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) { |
2676 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, |
2677 | (u32) virt_to_phys(address: ugeth->p_rx_bd_ring[i])); |
2678 | /* rest of fields handled by QE */ |
2679 | } |
2680 | |
2681 | /* remoder */ |
2682 | /* Already has speed set */ |
2683 | |
2684 | if (ugeth->rx_extended_features) |
2685 | remoder |= REMODER_RX_EXTENDED_FEATURES; |
2686 | if (ug_info->rxExtendedFiltering) |
2687 | remoder |= REMODER_RX_EXTENDED_FILTERING; |
2688 | if (ug_info->dynamicMaxFrameLength) |
2689 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; |
2690 | if (ug_info->dynamicMinFrameLength) |
2691 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; |
2692 | remoder |= |
2693 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; |
2694 | remoder |= |
2695 | ug_info-> |
2696 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; |
2697 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; |
2698 | remoder |= ((ucc_geth_rx_queues(info: ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT); |
2699 | if (ug_info->ipCheckSumCheck) |
2700 | remoder |= REMODER_IP_CHECKSUM_CHECK; |
2701 | if (ug_info->ipAddressAlignment) |
2702 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; |
2703 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); |
2704 | |
2705 | /* Note that this function must be called */ |
2706 | /* ONLY AFTER p_tx_fw_statistics_pram */ |
2707 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ |
2708 | init_firmware_statistics_gathering_mode(enable_tx_firmware_statistics: (ug_info-> |
2709 | statisticsMode & |
2710 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), |
2711 | enable_rx_firmware_statistics: (ug_info->statisticsMode & |
2712 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), |
2713 | tx_rmon_base_ptr: &ugeth->p_tx_glbl_pram->txrmonbaseptr, |
2714 | tx_firmware_statistics_structure_address: ugeth->tx_fw_statistics_pram_offset, |
2715 | rx_rmon_base_ptr: &ugeth->p_rx_glbl_pram->rxrmonbaseptr, |
2716 | rx_firmware_statistics_structure_address: ugeth->rx_fw_statistics_pram_offset, |
2717 | temoder_register: &ugeth->p_tx_glbl_pram->temoder, |
2718 | remoder_register: &ugeth->p_rx_glbl_pram->remoder); |
2719 | |
2720 | /* function code register */ |
2721 | out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); |
2722 | |
2723 | /* initialize extended filtering */ |
2724 | if (ug_info->rxExtendedFiltering) { |
2725 | if (!ug_info->extendedFilteringChainPointer) { |
2726 | if (netif_msg_ifup(ugeth)) |
2727 | pr_err("Null Extended Filtering Chain Pointer\n" ); |
2728 | return -EINVAL; |
2729 | } |
2730 | |
2731 | /* Allocate memory for extended filtering Mode Global |
2732 | Parameters */ |
2733 | ugeth->exf_glbl_param_offset = |
2734 | qe_muram_alloc(size: sizeof(struct ucc_geth_exf_global_pram), |
2735 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); |
2736 | if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { |
2737 | if (netif_msg_ifup(ugeth)) |
2738 | pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n" ); |
2739 | return -ENOMEM; |
2740 | } |
2741 | |
2742 | ugeth->p_exf_glbl_param = |
2743 | (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(offset: ugeth-> |
2744 | exf_glbl_param_offset); |
2745 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, |
2746 | ugeth->exf_glbl_param_offset); |
2747 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, |
2748 | (u32) ug_info->extendedFilteringChainPointer); |
2749 | |
2750 | } else { /* initialize 82xx style address filtering */ |
2751 | |
2752 | /* Init individual address recognition registers to disabled */ |
2753 | |
2754 | for (j = 0; j < NUM_OF_PADDRS; j++) |
2755 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, paddr_num: (u8) j); |
2756 | |
2757 | p_82xx_addr_filt = |
2758 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
2759 | p_rx_glbl_pram->addressfiltering; |
2760 | |
2761 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, |
2762 | enet_addr_type: ENET_ADDR_TYPE_GROUP); |
2763 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, |
2764 | enet_addr_type: ENET_ADDR_TYPE_INDIVIDUAL); |
2765 | } |
2766 | |
2767 | /* |
2768 | * Initialize UCC at QE level |
2769 | */ |
2770 | |
2771 | command = QE_INIT_TX_RX; |
2772 | |
2773 | /* Allocate shadow InitEnet command parameter structure. |
2774 | * This is needed because after the InitEnet command is executed, |
2775 | * the structure in DPRAM is released, because DPRAM is a premium |
2776 | * resource. |
2777 | * This shadow structure keeps a copy of what was done so that the |
2778 | * allocated resources can be released when the channel is freed. |
2779 | */ |
2780 | if (!(ugeth->p_init_enet_param_shadow = |
2781 | kzalloc(size: sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { |
2782 | if (netif_msg_ifup(ugeth)) |
2783 | pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n" ); |
2784 | return -ENOMEM; |
2785 | } |
2786 | |
2787 | /* Fill shadow InitEnet command parameter structure */ |
2788 | |
2789 | ugeth->p_init_enet_param_shadow->resinit1 = |
2790 | ENET_INIT_PARAM_MAGIC_RES_INIT1; |
2791 | ugeth->p_init_enet_param_shadow->resinit2 = |
2792 | ENET_INIT_PARAM_MAGIC_RES_INIT2; |
2793 | ugeth->p_init_enet_param_shadow->resinit3 = |
2794 | ENET_INIT_PARAM_MAGIC_RES_INIT3; |
2795 | ugeth->p_init_enet_param_shadow->resinit4 = |
2796 | ENET_INIT_PARAM_MAGIC_RES_INIT4; |
2797 | ugeth->p_init_enet_param_shadow->resinit5 = |
2798 | ENET_INIT_PARAM_MAGIC_RES_INIT5; |
2799 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= |
2800 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; |
2801 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= |
2802 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; |
2803 | |
2804 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= |
2805 | rx_glbl_pram_offset | ug_info->riscRx; |
2806 | if ((ug_info->largestexternallookupkeysize != |
2807 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && |
2808 | (ug_info->largestexternallookupkeysize != |
2809 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && |
2810 | (ug_info->largestexternallookupkeysize != |
2811 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { |
2812 | if (netif_msg_ifup(ugeth)) |
2813 | pr_err("Invalid largest External Lookup Key Size\n" ); |
2814 | return -EINVAL; |
2815 | } |
2816 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = |
2817 | ug_info->largestexternallookupkeysize; |
2818 | size = sizeof(struct ucc_geth_thread_rx_pram); |
2819 | if (ug_info->rxExtendedFiltering) { |
2820 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; |
2821 | if (ug_info->largestexternallookupkeysize == |
2822 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) |
2823 | size += |
2824 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; |
2825 | if (ug_info->largestexternallookupkeysize == |
2826 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) |
2827 | size += |
2828 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; |
2829 | } |
2830 | |
2831 | if ((ret_val = fill_init_enet_entries(ugeth, p_start: &(ugeth-> |
2832 | p_init_enet_param_shadow->rxthread[0]), |
2833 | num_entries: (u8) (numThreadsRxNumerical + 1) |
2834 | /* Rx needs one extra for terminator */ |
2835 | , thread_size: size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, |
2836 | risc: ug_info->riscRx, skip_page_for_first_entry: 1)) != 0) { |
2837 | if (netif_msg_ifup(ugeth)) |
2838 | pr_err("Can not fill p_init_enet_param_shadow\n" ); |
2839 | return ret_val; |
2840 | } |
2841 | |
2842 | ugeth->p_init_enet_param_shadow->txglobal = |
2843 | tx_glbl_pram_offset | ug_info->riscTx; |
2844 | if ((ret_val = |
2845 | fill_init_enet_entries(ugeth, |
2846 | p_start: &(ugeth->p_init_enet_param_shadow-> |
2847 | txthread[0]), num_entries: numThreadsTxNumerical, |
2848 | thread_size: sizeof(struct ucc_geth_thread_tx_pram), |
2849 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, |
2850 | risc: ug_info->riscTx, skip_page_for_first_entry: 0)) != 0) { |
2851 | if (netif_msg_ifup(ugeth)) |
2852 | pr_err("Can not fill p_init_enet_param_shadow\n" ); |
2853 | return ret_val; |
2854 | } |
2855 | |
2856 | /* Load Rx bds with buffers */ |
2857 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) { |
2858 | if ((ret_val = rx_bd_buffer_set(ugeth, rxQ: (u8) i)) != 0) { |
2859 | if (netif_msg_ifup(ugeth)) |
2860 | pr_err("Can not fill Rx bds with buffers\n" ); |
2861 | return ret_val; |
2862 | } |
2863 | } |
2864 | |
2865 | /* Allocate InitEnet command parameter structure */ |
2866 | init_enet_pram_offset = qe_muram_alloc(size: sizeof(struct ucc_geth_init_pram), align: 4); |
2867 | if (IS_ERR_VALUE(init_enet_pram_offset)) { |
2868 | if (netif_msg_ifup(ugeth)) |
2869 | pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n" ); |
2870 | return -ENOMEM; |
2871 | } |
2872 | p_init_enet_pram = |
2873 | (struct ucc_geth_init_pram __iomem *) qe_muram_addr(offset: init_enet_pram_offset); |
2874 | |
2875 | /* Copy shadow InitEnet command parameter structure into PRAM */ |
2876 | out_8(&p_init_enet_pram->resinit1, |
2877 | ugeth->p_init_enet_param_shadow->resinit1); |
2878 | out_8(&p_init_enet_pram->resinit2, |
2879 | ugeth->p_init_enet_param_shadow->resinit2); |
2880 | out_8(&p_init_enet_pram->resinit3, |
2881 | ugeth->p_init_enet_param_shadow->resinit3); |
2882 | out_8(&p_init_enet_pram->resinit4, |
2883 | ugeth->p_init_enet_param_shadow->resinit4); |
2884 | out_be16(&p_init_enet_pram->resinit5, |
2885 | ugeth->p_init_enet_param_shadow->resinit5); |
2886 | out_8(&p_init_enet_pram->largestexternallookupkeysize, |
2887 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); |
2888 | out_be32(&p_init_enet_pram->rgftgfrxglobal, |
2889 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); |
2890 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) |
2891 | out_be32(&p_init_enet_pram->rxthread[i], |
2892 | ugeth->p_init_enet_param_shadow->rxthread[i]); |
2893 | out_be32(&p_init_enet_pram->txglobal, |
2894 | ugeth->p_init_enet_param_shadow->txglobal); |
2895 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) |
2896 | out_be32(&p_init_enet_pram->txthread[i], |
2897 | ugeth->p_init_enet_param_shadow->txthread[i]); |
2898 | |
2899 | /* Issue QE command */ |
2900 | cecr_subblock = |
2901 | ucc_fast_get_qe_cr_subblock(uccf_num: ugeth->ug_info->uf_info.ucc_num); |
2902 | qe_issue_cmd(cmd: command, device: cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
2903 | cmd_input: init_enet_pram_offset); |
2904 | |
2905 | /* Free InitEnet command parameter */ |
2906 | qe_muram_free(offset: init_enet_pram_offset); |
2907 | |
2908 | return 0; |
2909 | } |
2910 | |
2911 | /* This is called by the kernel when a frame is ready for transmission. */ |
2912 | /* It is pointed to by the dev->hard_start_xmit function pointer */ |
2913 | static netdev_tx_t |
2914 | ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2915 | { |
2916 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
2917 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
2918 | struct ucc_fast_private *uccf; |
2919 | #endif |
2920 | u8 __iomem *bd; /* BD pointer */ |
2921 | u32 bd_status; |
2922 | u8 txQ = 0; |
2923 | unsigned long flags; |
2924 | |
2925 | ugeth_vdbg("%s: IN" , __func__); |
2926 | |
2927 | netdev_sent_queue(dev, bytes: skb->len); |
2928 | spin_lock_irqsave(&ugeth->lock, flags); |
2929 | |
2930 | dev->stats.tx_bytes += skb->len; |
2931 | |
2932 | /* Start from the next BD that should be filled */ |
2933 | bd = ugeth->txBd[txQ]; |
2934 | bd_status = in_be32((u32 __iomem *)bd); |
2935 | /* Save the skb pointer so we can free it later */ |
2936 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; |
2937 | |
2938 | /* Update the current skb pointer (wrapping if this was the last) */ |
2939 | ugeth->skb_curtx[txQ] = |
2940 | (ugeth->skb_curtx[txQ] + |
2941 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); |
2942 | |
2943 | /* set up the buffer descriptor */ |
2944 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
2945 | dma_map_single(ugeth->dev, skb->data, |
2946 | skb->len, DMA_TO_DEVICE)); |
2947 | |
2948 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
2949 | |
2950 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; |
2951 | |
2952 | /* set bd status and length */ |
2953 | out_be32((u32 __iomem *)bd, bd_status); |
2954 | |
2955 | /* Move to next BD in the ring */ |
2956 | if (!(bd_status & T_W)) |
2957 | bd += sizeof(struct qe_bd); |
2958 | else |
2959 | bd = ugeth->p_tx_bd_ring[txQ]; |
2960 | |
2961 | /* If the next BD still needs to be cleaned up, then the bds |
2962 | are full. We need to tell the kernel to stop sending us stuff. */ |
2963 | if (bd == ugeth->confBd[txQ]) { |
2964 | if (!netif_queue_stopped(dev)) |
2965 | netif_stop_queue(dev); |
2966 | } |
2967 | |
2968 | ugeth->txBd[txQ] = bd; |
2969 | |
2970 | skb_tx_timestamp(skb); |
2971 | |
2972 | if (ugeth->p_scheduler) { |
2973 | ugeth->cpucount[txQ]++; |
2974 | /* Indicate to QE that there are more Tx bds ready for |
2975 | transmission */ |
2976 | /* This is done by writing a running counter of the bd |
2977 | count to the scheduler PRAM. */ |
2978 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); |
2979 | } |
2980 | |
2981 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
2982 | uccf = ugeth->uccf; |
2983 | out_be16(uccf->p_utodr, UCC_FAST_TOD); |
2984 | #endif |
2985 | spin_unlock_irqrestore(lock: &ugeth->lock, flags); |
2986 | |
2987 | return NETDEV_TX_OK; |
2988 | } |
2989 | |
2990 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
2991 | { |
2992 | struct sk_buff *skb; |
2993 | u8 __iomem *bd; |
2994 | u16 length, howmany = 0; |
2995 | u32 bd_status; |
2996 | u8 *bdBuffer; |
2997 | struct net_device *dev; |
2998 | |
2999 | ugeth_vdbg("%s: IN" , __func__); |
3000 | |
3001 | dev = ugeth->ndev; |
3002 | |
3003 | /* collect received buffers */ |
3004 | bd = ugeth->rxBd[rxQ]; |
3005 | |
3006 | bd_status = in_be32((u32 __iomem *)bd); |
3007 | |
3008 | /* while there are received buffers and BD is full (~R_E) */ |
3009 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { |
3010 | bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); |
3011 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); |
3012 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; |
3013 | |
3014 | /* determine whether buffer is first, last, first and last |
3015 | (single buffer frame) or middle (not first and not last) */ |
3016 | if (!skb || |
3017 | (!(bd_status & (R_F | R_L))) || |
3018 | (bd_status & R_ERRORS_FATAL)) { |
3019 | if (netif_msg_rx_err(ugeth)) |
3020 | pr_err("%d: ERROR!!! skb - 0x%08x\n" , |
3021 | __LINE__, (u32)skb); |
3022 | dev_kfree_skb(skb); |
3023 | |
3024 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; |
3025 | dev->stats.rx_dropped++; |
3026 | } else { |
3027 | dev->stats.rx_packets++; |
3028 | howmany++; |
3029 | |
3030 | /* Prep the skb for the packet */ |
3031 | skb_put(skb, len: length); |
3032 | |
3033 | /* Tell the skb what kind of packet this is */ |
3034 | skb->protocol = eth_type_trans(skb, dev: ugeth->ndev); |
3035 | |
3036 | dev->stats.rx_bytes += length; |
3037 | /* Send the packet up the stack */ |
3038 | netif_receive_skb(skb); |
3039 | } |
3040 | |
3041 | skb = get_new_skb(ugeth, bd); |
3042 | if (!skb) { |
3043 | if (netif_msg_rx_err(ugeth)) |
3044 | pr_warn("No Rx Data Buffer\n" ); |
3045 | dev->stats.rx_dropped++; |
3046 | break; |
3047 | } |
3048 | |
3049 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; |
3050 | |
3051 | /* update to point at the next skb */ |
3052 | ugeth->skb_currx[rxQ] = |
3053 | (ugeth->skb_currx[rxQ] + |
3054 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); |
3055 | |
3056 | if (bd_status & R_W) |
3057 | bd = ugeth->p_rx_bd_ring[rxQ]; |
3058 | else |
3059 | bd += sizeof(struct qe_bd); |
3060 | |
3061 | bd_status = in_be32((u32 __iomem *)bd); |
3062 | } |
3063 | |
3064 | ugeth->rxBd[rxQ] = bd; |
3065 | return howmany; |
3066 | } |
3067 | |
3068 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) |
3069 | { |
3070 | /* Start from the next BD that should be filled */ |
3071 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3072 | unsigned int bytes_sent = 0; |
3073 | int howmany = 0; |
3074 | u8 __iomem *bd; /* BD pointer */ |
3075 | u32 bd_status; |
3076 | |
3077 | bd = ugeth->confBd[txQ]; |
3078 | bd_status = in_be32((u32 __iomem *)bd); |
3079 | |
3080 | /* Normal processing. */ |
3081 | while ((bd_status & T_R) == 0) { |
3082 | struct sk_buff *skb; |
3083 | |
3084 | /* BD contains already transmitted buffer. */ |
3085 | /* Handle the transmitted buffer and release */ |
3086 | /* the BD to be used with the current frame */ |
3087 | |
3088 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3089 | if (!skb) |
3090 | break; |
3091 | howmany++; |
3092 | bytes_sent += skb->len; |
3093 | dev->stats.tx_packets++; |
3094 | |
3095 | dev_consume_skb_any(skb); |
3096 | |
3097 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; |
3098 | ugeth->skb_dirtytx[txQ] = |
3099 | (ugeth->skb_dirtytx[txQ] + |
3100 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); |
3101 | |
3102 | /* We freed a buffer, so now we can restart transmission */ |
3103 | if (netif_queue_stopped(dev)) |
3104 | netif_wake_queue(dev); |
3105 | |
3106 | /* Advance the confirmation BD pointer */ |
3107 | if (!(bd_status & T_W)) |
3108 | bd += sizeof(struct qe_bd); |
3109 | else |
3110 | bd = ugeth->p_tx_bd_ring[txQ]; |
3111 | bd_status = in_be32((u32 __iomem *)bd); |
3112 | } |
3113 | ugeth->confBd[txQ] = bd; |
3114 | netdev_completed_queue(dev, pkts: howmany, bytes: bytes_sent); |
3115 | return 0; |
3116 | } |
3117 | |
3118 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
3119 | { |
3120 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
3121 | struct ucc_geth_info *ug_info; |
3122 | int howmany, i; |
3123 | |
3124 | ug_info = ugeth->ug_info; |
3125 | |
3126 | /* Tx event processing */ |
3127 | spin_lock(lock: &ugeth->lock); |
3128 | for (i = 0; i < ucc_geth_tx_queues(info: ug_info); i++) |
3129 | ucc_geth_tx(dev: ugeth->ndev, txQ: i); |
3130 | spin_unlock(lock: &ugeth->lock); |
3131 | |
3132 | howmany = 0; |
3133 | for (i = 0; i < ucc_geth_rx_queues(info: ug_info); i++) |
3134 | howmany += ucc_geth_rx(ugeth, rxQ: i, rx_work_limit: budget - howmany); |
3135 | |
3136 | if (howmany < budget) { |
3137 | napi_complete_done(n: napi, work_done: howmany); |
3138 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
3139 | } |
3140 | |
3141 | return howmany; |
3142 | } |
3143 | |
3144 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info) |
3145 | { |
3146 | struct net_device *dev = info; |
3147 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3148 | struct ucc_fast_private *uccf; |
3149 | struct ucc_geth_info *ug_info; |
3150 | register u32 ucce; |
3151 | register u32 uccm; |
3152 | |
3153 | ugeth_vdbg("%s: IN" , __func__); |
3154 | |
3155 | uccf = ugeth->uccf; |
3156 | ug_info = ugeth->ug_info; |
3157 | |
3158 | /* read and clear events */ |
3159 | ucce = (u32) in_be32(uccf->p_ucce); |
3160 | uccm = (u32) in_be32(uccf->p_uccm); |
3161 | ucce &= uccm; |
3162 | out_be32(uccf->p_ucce, ucce); |
3163 | |
3164 | /* check for receive events that require processing */ |
3165 | if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { |
3166 | if (napi_schedule_prep(n: &ugeth->napi)) { |
3167 | uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
3168 | out_be32(uccf->p_uccm, uccm); |
3169 | __napi_schedule(n: &ugeth->napi); |
3170 | } |
3171 | } |
3172 | |
3173 | /* Errors and other events */ |
3174 | if (ucce & UCCE_OTHER) { |
3175 | if (ucce & UCC_GETH_UCCE_BSY) |
3176 | dev->stats.rx_errors++; |
3177 | if (ucce & UCC_GETH_UCCE_TXE) |
3178 | dev->stats.tx_errors++; |
3179 | } |
3180 | |
3181 | return IRQ_HANDLED; |
3182 | } |
3183 | |
3184 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3185 | /* |
3186 | * Polling 'interrupt' - used by things like netconsole to send skbs |
3187 | * without having to re-enable interrupts. It's not called while |
3188 | * the interrupt routine is executing. |
3189 | */ |
3190 | static void ucc_netpoll(struct net_device *dev) |
3191 | { |
3192 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3193 | int irq = ugeth->ug_info->uf_info.irq; |
3194 | |
3195 | disable_irq(irq); |
3196 | ucc_geth_irq_handler(irq, info: dev); |
3197 | enable_irq(irq); |
3198 | } |
3199 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
3200 | |
3201 | static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) |
3202 | { |
3203 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3204 | struct sockaddr *addr = p; |
3205 | |
3206 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
3207 | return -EADDRNOTAVAIL; |
3208 | |
3209 | eth_hw_addr_set(dev, addr: addr->sa_data); |
3210 | |
3211 | /* |
3212 | * If device is not running, we will set mac addr register |
3213 | * when opening the device. |
3214 | */ |
3215 | if (!netif_running(dev)) |
3216 | return 0; |
3217 | |
3218 | spin_lock_irq(lock: &ugeth->lock); |
3219 | init_mac_station_addr_regs(address_byte_0: dev->dev_addr[0], |
3220 | address_byte_1: dev->dev_addr[1], |
3221 | address_byte_2: dev->dev_addr[2], |
3222 | address_byte_3: dev->dev_addr[3], |
3223 | address_byte_4: dev->dev_addr[4], |
3224 | address_byte_5: dev->dev_addr[5], |
3225 | macstnaddr1_register: &ugeth->ug_regs->macstnaddr1, |
3226 | macstnaddr2_register: &ugeth->ug_regs->macstnaddr2); |
3227 | spin_unlock_irq(lock: &ugeth->lock); |
3228 | |
3229 | return 0; |
3230 | } |
3231 | |
3232 | static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) |
3233 | { |
3234 | struct net_device *dev = ugeth->ndev; |
3235 | int err; |
3236 | |
3237 | err = ucc_struct_init(ugeth); |
3238 | if (err) { |
3239 | netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n" ); |
3240 | goto err; |
3241 | } |
3242 | |
3243 | err = ucc_geth_startup(ugeth); |
3244 | if (err) { |
3245 | netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n" ); |
3246 | goto err; |
3247 | } |
3248 | |
3249 | err = adjust_enet_interface(ugeth); |
3250 | if (err) { |
3251 | netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n" ); |
3252 | goto err; |
3253 | } |
3254 | |
3255 | /* Set MACSTNADDR1, MACSTNADDR2 */ |
3256 | /* For more details see the hardware spec. */ |
3257 | init_mac_station_addr_regs(address_byte_0: dev->dev_addr[0], |
3258 | address_byte_1: dev->dev_addr[1], |
3259 | address_byte_2: dev->dev_addr[2], |
3260 | address_byte_3: dev->dev_addr[3], |
3261 | address_byte_4: dev->dev_addr[4], |
3262 | address_byte_5: dev->dev_addr[5], |
3263 | macstnaddr1_register: &ugeth->ug_regs->macstnaddr1, |
3264 | macstnaddr2_register: &ugeth->ug_regs->macstnaddr2); |
3265 | |
3266 | err = ugeth_enable(ugeth, mode: COMM_DIR_RX_AND_TX); |
3267 | if (err) { |
3268 | netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n" ); |
3269 | goto err; |
3270 | } |
3271 | |
3272 | return 0; |
3273 | err: |
3274 | ucc_geth_stop(ugeth); |
3275 | return err; |
3276 | } |
3277 | |
3278 | /* Called when something needs to use the ethernet device */ |
3279 | /* Returns 0 for success. */ |
3280 | static int ucc_geth_open(struct net_device *dev) |
3281 | { |
3282 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3283 | int err; |
3284 | |
3285 | ugeth_vdbg("%s: IN" , __func__); |
3286 | |
3287 | /* Test station address */ |
3288 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { |
3289 | netif_err(ugeth, ifup, dev, |
3290 | "Multicast address used for station address - is this what you wanted?\n" ); |
3291 | return -EINVAL; |
3292 | } |
3293 | |
3294 | err = init_phy(dev); |
3295 | if (err) { |
3296 | netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n" ); |
3297 | return err; |
3298 | } |
3299 | |
3300 | err = ucc_geth_init_mac(ugeth); |
3301 | if (err) { |
3302 | netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n" ); |
3303 | goto err; |
3304 | } |
3305 | |
3306 | err = request_irq(irq: ugeth->ug_info->uf_info.irq, handler: ucc_geth_irq_handler, |
3307 | flags: 0, name: "UCC Geth" , dev); |
3308 | if (err) { |
3309 | netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n" ); |
3310 | goto err; |
3311 | } |
3312 | |
3313 | phy_start(phydev: ugeth->phydev); |
3314 | napi_enable(n: &ugeth->napi); |
3315 | netdev_reset_queue(dev_queue: dev); |
3316 | netif_start_queue(dev); |
3317 | |
3318 | device_set_wakeup_capable(dev: &dev->dev, |
3319 | capable: qe_alive_during_sleep() || ugeth->phydev->irq); |
3320 | device_set_wakeup_enable(dev: &dev->dev, enable: ugeth->wol_en); |
3321 | |
3322 | return err; |
3323 | |
3324 | err: |
3325 | ucc_geth_stop(ugeth); |
3326 | return err; |
3327 | } |
3328 | |
3329 | /* Stops the kernel queue, and halts the controller */ |
3330 | static int ucc_geth_close(struct net_device *dev) |
3331 | { |
3332 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3333 | |
3334 | ugeth_vdbg("%s: IN" , __func__); |
3335 | |
3336 | napi_disable(n: &ugeth->napi); |
3337 | |
3338 | cancel_work_sync(work: &ugeth->timeout_work); |
3339 | ucc_geth_stop(ugeth); |
3340 | phy_disconnect(phydev: ugeth->phydev); |
3341 | ugeth->phydev = NULL; |
3342 | |
3343 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); |
3344 | |
3345 | netif_stop_queue(dev); |
3346 | netdev_reset_queue(dev_queue: dev); |
3347 | |
3348 | return 0; |
3349 | } |
3350 | |
3351 | /* Reopen device. This will reset the MAC and PHY. */ |
3352 | static void ucc_geth_timeout_work(struct work_struct *work) |
3353 | { |
3354 | struct ucc_geth_private *ugeth; |
3355 | struct net_device *dev; |
3356 | |
3357 | ugeth = container_of(work, struct ucc_geth_private, timeout_work); |
3358 | dev = ugeth->ndev; |
3359 | |
3360 | ugeth_vdbg("%s: IN" , __func__); |
3361 | |
3362 | dev->stats.tx_errors++; |
3363 | |
3364 | ugeth_dump_regs(ugeth); |
3365 | |
3366 | if (dev->flags & IFF_UP) { |
3367 | /* |
3368 | * Must reset MAC *and* PHY. This is done by reopening |
3369 | * the device. |
3370 | */ |
3371 | netif_tx_stop_all_queues(dev); |
3372 | ucc_geth_stop(ugeth); |
3373 | ucc_geth_init_mac(ugeth); |
3374 | /* Must start PHY here */ |
3375 | phy_start(phydev: ugeth->phydev); |
3376 | netif_tx_start_all_queues(dev); |
3377 | } |
3378 | |
3379 | netif_tx_schedule_all(dev); |
3380 | } |
3381 | |
3382 | /* |
3383 | * ucc_geth_timeout gets called when a packet has not been |
3384 | * transmitted after a set amount of time. |
3385 | */ |
3386 | static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue) |
3387 | { |
3388 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3389 | |
3390 | schedule_work(work: &ugeth->timeout_work); |
3391 | } |
3392 | |
3393 | |
3394 | #ifdef CONFIG_PM |
3395 | |
3396 | static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) |
3397 | { |
3398 | struct net_device *ndev = platform_get_drvdata(pdev: ofdev); |
3399 | struct ucc_geth_private *ugeth = netdev_priv(dev: ndev); |
3400 | |
3401 | if (!netif_running(dev: ndev)) |
3402 | return 0; |
3403 | |
3404 | netif_device_detach(dev: ndev); |
3405 | napi_disable(n: &ugeth->napi); |
3406 | |
3407 | /* |
3408 | * Disable the controller, otherwise we'll wakeup on any network |
3409 | * activity. |
3410 | */ |
3411 | ugeth_disable(ugeth, mode: COMM_DIR_RX_AND_TX); |
3412 | |
3413 | if (ugeth->wol_en & WAKE_MAGIC) { |
3414 | setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); |
3415 | setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); |
3416 | ucc_fast_enable(uccf: ugeth->uccf, mode: COMM_DIR_RX_AND_TX); |
3417 | } else if (!(ugeth->wol_en & WAKE_PHY)) { |
3418 | phy_stop(phydev: ugeth->phydev); |
3419 | } |
3420 | |
3421 | return 0; |
3422 | } |
3423 | |
3424 | static int ucc_geth_resume(struct platform_device *ofdev) |
3425 | { |
3426 | struct net_device *ndev = platform_get_drvdata(pdev: ofdev); |
3427 | struct ucc_geth_private *ugeth = netdev_priv(dev: ndev); |
3428 | int err; |
3429 | |
3430 | if (!netif_running(dev: ndev)) |
3431 | return 0; |
3432 | |
3433 | if (qe_alive_during_sleep()) { |
3434 | if (ugeth->wol_en & WAKE_MAGIC) { |
3435 | ucc_fast_disable(uccf: ugeth->uccf, mode: COMM_DIR_RX_AND_TX); |
3436 | clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); |
3437 | clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); |
3438 | } |
3439 | ugeth_enable(ugeth, mode: COMM_DIR_RX_AND_TX); |
3440 | } else { |
3441 | /* |
3442 | * Full reinitialization is required if QE shuts down |
3443 | * during sleep. |
3444 | */ |
3445 | ucc_geth_memclean(ugeth); |
3446 | |
3447 | err = ucc_geth_init_mac(ugeth); |
3448 | if (err) { |
3449 | netdev_err(dev: ndev, format: "Cannot initialize MAC, aborting\n" ); |
3450 | return err; |
3451 | } |
3452 | } |
3453 | |
3454 | ugeth->oldlink = 0; |
3455 | ugeth->oldspeed = 0; |
3456 | ugeth->oldduplex = -1; |
3457 | |
3458 | phy_stop(phydev: ugeth->phydev); |
3459 | phy_start(phydev: ugeth->phydev); |
3460 | |
3461 | napi_enable(n: &ugeth->napi); |
3462 | netif_device_attach(dev: ndev); |
3463 | |
3464 | return 0; |
3465 | } |
3466 | |
3467 | #else |
3468 | #define ucc_geth_suspend NULL |
3469 | #define ucc_geth_resume NULL |
3470 | #endif |
3471 | |
3472 | static phy_interface_t to_phy_interface(const char *phy_connection_type) |
3473 | { |
3474 | if (strcasecmp(s1: phy_connection_type, s2: "mii" ) == 0) |
3475 | return PHY_INTERFACE_MODE_MII; |
3476 | if (strcasecmp(s1: phy_connection_type, s2: "gmii" ) == 0) |
3477 | return PHY_INTERFACE_MODE_GMII; |
3478 | if (strcasecmp(s1: phy_connection_type, s2: "tbi" ) == 0) |
3479 | return PHY_INTERFACE_MODE_TBI; |
3480 | if (strcasecmp(s1: phy_connection_type, s2: "rmii" ) == 0) |
3481 | return PHY_INTERFACE_MODE_RMII; |
3482 | if (strcasecmp(s1: phy_connection_type, s2: "rgmii" ) == 0) |
3483 | return PHY_INTERFACE_MODE_RGMII; |
3484 | if (strcasecmp(s1: phy_connection_type, s2: "rgmii-id" ) == 0) |
3485 | return PHY_INTERFACE_MODE_RGMII_ID; |
3486 | if (strcasecmp(s1: phy_connection_type, s2: "rgmii-txid" ) == 0) |
3487 | return PHY_INTERFACE_MODE_RGMII_TXID; |
3488 | if (strcasecmp(s1: phy_connection_type, s2: "rgmii-rxid" ) == 0) |
3489 | return PHY_INTERFACE_MODE_RGMII_RXID; |
3490 | if (strcasecmp(s1: phy_connection_type, s2: "rtbi" ) == 0) |
3491 | return PHY_INTERFACE_MODE_RTBI; |
3492 | if (strcasecmp(s1: phy_connection_type, s2: "sgmii" ) == 0) |
3493 | return PHY_INTERFACE_MODE_SGMII; |
3494 | |
3495 | return PHY_INTERFACE_MODE_MII; |
3496 | } |
3497 | |
3498 | static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
3499 | { |
3500 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3501 | |
3502 | if (!netif_running(dev)) |
3503 | return -EINVAL; |
3504 | |
3505 | if (!ugeth->phydev) |
3506 | return -ENODEV; |
3507 | |
3508 | return phy_mii_ioctl(phydev: ugeth->phydev, ifr: rq, cmd); |
3509 | } |
3510 | |
3511 | static const struct net_device_ops ucc_geth_netdev_ops = { |
3512 | .ndo_open = ucc_geth_open, |
3513 | .ndo_stop = ucc_geth_close, |
3514 | .ndo_start_xmit = ucc_geth_start_xmit, |
3515 | .ndo_validate_addr = eth_validate_addr, |
3516 | .ndo_change_carrier = fixed_phy_change_carrier, |
3517 | .ndo_set_mac_address = ucc_geth_set_mac_addr, |
3518 | .ndo_set_rx_mode = ucc_geth_set_multi, |
3519 | .ndo_tx_timeout = ucc_geth_timeout, |
3520 | .ndo_eth_ioctl = ucc_geth_ioctl, |
3521 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3522 | .ndo_poll_controller = ucc_netpoll, |
3523 | #endif |
3524 | }; |
3525 | |
3526 | static int ucc_geth_parse_clock(struct device_node *np, const char *which, |
3527 | enum qe_clock *out) |
3528 | { |
3529 | const char *sprop; |
3530 | char buf[24]; |
3531 | |
3532 | snprintf(buf, size: sizeof(buf), fmt: "%s-clock-name" , which); |
3533 | sprop = of_get_property(node: np, name: buf, NULL); |
3534 | if (sprop) { |
3535 | *out = qe_clock_source(source: sprop); |
3536 | } else { |
3537 | u32 val; |
3538 | |
3539 | snprintf(buf, size: sizeof(buf), fmt: "%s-clock" , which); |
3540 | if (of_property_read_u32(np, propname: buf, out_value: &val)) { |
3541 | /* If both *-clock-name and *-clock are missing, |
3542 | * we want to tell people to use *-clock-name. |
3543 | */ |
3544 | pr_err("missing %s-clock-name property\n" , buf); |
3545 | return -EINVAL; |
3546 | } |
3547 | *out = val; |
3548 | } |
3549 | if (*out < QE_CLK_NONE || *out > QE_CLK24) { |
3550 | pr_err("invalid %s property\n" , buf); |
3551 | return -EINVAL; |
3552 | } |
3553 | return 0; |
3554 | } |
3555 | |
3556 | static int ucc_geth_probe(struct platform_device* ofdev) |
3557 | { |
3558 | struct device *device = &ofdev->dev; |
3559 | struct device_node *np = ofdev->dev.of_node; |
3560 | struct net_device *dev = NULL; |
3561 | struct ucc_geth_private *ugeth = NULL; |
3562 | struct ucc_geth_info *ug_info; |
3563 | struct resource res; |
3564 | int err, ucc_num, max_speed = 0; |
3565 | const unsigned int *prop; |
3566 | phy_interface_t phy_interface; |
3567 | static const int enet_to_speed[] = { |
3568 | SPEED_10, SPEED_10, SPEED_10, |
3569 | SPEED_100, SPEED_100, SPEED_100, |
3570 | SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, |
3571 | }; |
3572 | static const phy_interface_t enet_to_phy_interface[] = { |
3573 | PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, |
3574 | PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, |
3575 | PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, |
3576 | PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, |
3577 | PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, |
3578 | PHY_INTERFACE_MODE_SGMII, |
3579 | }; |
3580 | |
3581 | ugeth_vdbg("%s: IN" , __func__); |
3582 | |
3583 | prop = of_get_property(node: np, name: "cell-index" , NULL); |
3584 | if (!prop) { |
3585 | prop = of_get_property(node: np, name: "device-id" , NULL); |
3586 | if (!prop) |
3587 | return -ENODEV; |
3588 | } |
3589 | |
3590 | ucc_num = *prop - 1; |
3591 | if ((ucc_num < 0) || (ucc_num > 7)) |
3592 | return -ENODEV; |
3593 | |
3594 | ug_info = kmemdup(p: &ugeth_primary_info, size: sizeof(*ug_info), GFP_KERNEL); |
3595 | if (ug_info == NULL) |
3596 | return -ENOMEM; |
3597 | |
3598 | ug_info->uf_info.ucc_num = ucc_num; |
3599 | |
3600 | err = ucc_geth_parse_clock(np, which: "rx" , out: &ug_info->uf_info.rx_clock); |
3601 | if (err) |
3602 | goto err_free_info; |
3603 | err = ucc_geth_parse_clock(np, which: "tx" , out: &ug_info->uf_info.tx_clock); |
3604 | if (err) |
3605 | goto err_free_info; |
3606 | |
3607 | err = of_address_to_resource(dev: np, index: 0, r: &res); |
3608 | if (err) |
3609 | goto err_free_info; |
3610 | |
3611 | ug_info->uf_info.regs = res.start; |
3612 | ug_info->uf_info.irq = irq_of_parse_and_map(node: np, index: 0); |
3613 | |
3614 | ug_info->phy_node = of_parse_phandle(np, phandle_name: "phy-handle" , index: 0); |
3615 | if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { |
3616 | /* |
3617 | * In the case of a fixed PHY, the DT node associated |
3618 | * to the PHY is the Ethernet MAC DT node. |
3619 | */ |
3620 | err = of_phy_register_fixed_link(np); |
3621 | if (err) |
3622 | goto err_free_info; |
3623 | ug_info->phy_node = of_node_get(node: np); |
3624 | } |
3625 | |
3626 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ |
3627 | ug_info->tbi_node = of_parse_phandle(np, phandle_name: "tbi-handle" , index: 0); |
3628 | |
3629 | /* get the phy interface type, or default to MII */ |
3630 | prop = of_get_property(node: np, name: "phy-connection-type" , NULL); |
3631 | if (!prop) { |
3632 | /* handle interface property present in old trees */ |
3633 | prop = of_get_property(node: ug_info->phy_node, name: "interface" , NULL); |
3634 | if (prop != NULL) { |
3635 | phy_interface = enet_to_phy_interface[*prop]; |
3636 | max_speed = enet_to_speed[*prop]; |
3637 | } else |
3638 | phy_interface = PHY_INTERFACE_MODE_MII; |
3639 | } else { |
3640 | phy_interface = to_phy_interface(phy_connection_type: (const char *)prop); |
3641 | } |
3642 | |
3643 | /* get speed, or derive from PHY interface */ |
3644 | if (max_speed == 0) |
3645 | switch (phy_interface) { |
3646 | case PHY_INTERFACE_MODE_GMII: |
3647 | case PHY_INTERFACE_MODE_RGMII: |
3648 | case PHY_INTERFACE_MODE_RGMII_ID: |
3649 | case PHY_INTERFACE_MODE_RGMII_RXID: |
3650 | case PHY_INTERFACE_MODE_RGMII_TXID: |
3651 | case PHY_INTERFACE_MODE_TBI: |
3652 | case PHY_INTERFACE_MODE_RTBI: |
3653 | case PHY_INTERFACE_MODE_SGMII: |
3654 | max_speed = SPEED_1000; |
3655 | break; |
3656 | default: |
3657 | max_speed = SPEED_100; |
3658 | break; |
3659 | } |
3660 | |
3661 | if (max_speed == SPEED_1000) { |
3662 | unsigned int snums = qe_get_num_of_snums(); |
3663 | |
3664 | /* configure muram FIFOs for gigabit operation */ |
3665 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; |
3666 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; |
3667 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; |
3668 | ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; |
3669 | ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; |
3670 | ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; |
3671 | ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; |
3672 | |
3673 | /* If QE's snum number is 46/76 which means we need to support |
3674 | * 4 UECs at 1000Base-T simultaneously, we need to allocate |
3675 | * more Threads to Rx. |
3676 | */ |
3677 | if ((snums == 76) || (snums == 46)) |
3678 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; |
3679 | else |
3680 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; |
3681 | } |
3682 | |
3683 | if (netif_msg_probe(&debug)) |
3684 | pr_info("UCC%1d at 0x%8llx (irq = %d)\n" , |
3685 | ug_info->uf_info.ucc_num + 1, |
3686 | (u64)ug_info->uf_info.regs, |
3687 | ug_info->uf_info.irq); |
3688 | |
3689 | /* Create an ethernet device instance */ |
3690 | dev = alloc_etherdev(sizeof(*ugeth)); |
3691 | |
3692 | if (dev == NULL) { |
3693 | err = -ENOMEM; |
3694 | goto err_deregister_fixed_link; |
3695 | } |
3696 | |
3697 | ugeth = netdev_priv(dev); |
3698 | spin_lock_init(&ugeth->lock); |
3699 | |
3700 | /* Create CQs for hash tables */ |
3701 | INIT_LIST_HEAD(list: &ugeth->group_hash_q); |
3702 | INIT_LIST_HEAD(list: &ugeth->ind_hash_q); |
3703 | |
3704 | dev_set_drvdata(dev: device, data: dev); |
3705 | |
3706 | /* Set the dev->base_addr to the gfar reg region */ |
3707 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); |
3708 | |
3709 | SET_NETDEV_DEV(dev, device); |
3710 | |
3711 | /* Fill in the dev structure */ |
3712 | uec_set_ethtool_ops(netdev: dev); |
3713 | dev->netdev_ops = &ucc_geth_netdev_ops; |
3714 | dev->watchdog_timeo = TX_TIMEOUT; |
3715 | INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); |
3716 | netif_napi_add(dev, napi: &ugeth->napi, poll: ucc_geth_poll); |
3717 | dev->mtu = 1500; |
3718 | dev->max_mtu = 1518; |
3719 | |
3720 | ugeth->msg_enable = netif_msg_init(debug_value: debug.msg_enable, UGETH_MSG_DEFAULT); |
3721 | ugeth->phy_interface = phy_interface; |
3722 | ugeth->max_speed = max_speed; |
3723 | |
3724 | /* Carrier starts down, phylib will bring it up */ |
3725 | netif_carrier_off(dev); |
3726 | |
3727 | err = register_netdev(dev); |
3728 | if (err) { |
3729 | if (netif_msg_probe(ugeth)) |
3730 | pr_err("%s: Cannot register net device, aborting\n" , |
3731 | dev->name); |
3732 | goto err_free_netdev; |
3733 | } |
3734 | |
3735 | of_get_ethdev_address(np, dev); |
3736 | |
3737 | ugeth->ug_info = ug_info; |
3738 | ugeth->dev = device; |
3739 | ugeth->ndev = dev; |
3740 | ugeth->node = np; |
3741 | |
3742 | return 0; |
3743 | |
3744 | err_free_netdev: |
3745 | free_netdev(dev); |
3746 | err_deregister_fixed_link: |
3747 | if (of_phy_is_fixed_link(np)) |
3748 | of_phy_deregister_fixed_link(np); |
3749 | of_node_put(node: ug_info->tbi_node); |
3750 | of_node_put(node: ug_info->phy_node); |
3751 | err_free_info: |
3752 | kfree(objp: ug_info); |
3753 | |
3754 | return err; |
3755 | } |
3756 | |
3757 | static void ucc_geth_remove(struct platform_device* ofdev) |
3758 | { |
3759 | struct net_device *dev = platform_get_drvdata(pdev: ofdev); |
3760 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3761 | struct device_node *np = ofdev->dev.of_node; |
3762 | |
3763 | unregister_netdev(dev); |
3764 | ucc_geth_memclean(ugeth); |
3765 | if (of_phy_is_fixed_link(np)) |
3766 | of_phy_deregister_fixed_link(np); |
3767 | of_node_put(node: ugeth->ug_info->tbi_node); |
3768 | of_node_put(node: ugeth->ug_info->phy_node); |
3769 | kfree(objp: ugeth->ug_info); |
3770 | free_netdev(dev); |
3771 | } |
3772 | |
3773 | static const struct of_device_id ucc_geth_match[] = { |
3774 | { |
3775 | .type = "network" , |
3776 | .compatible = "ucc_geth" , |
3777 | }, |
3778 | {}, |
3779 | }; |
3780 | |
3781 | MODULE_DEVICE_TABLE(of, ucc_geth_match); |
3782 | |
3783 | static struct platform_driver ucc_geth_driver = { |
3784 | .driver = { |
3785 | .name = DRV_NAME, |
3786 | .of_match_table = ucc_geth_match, |
3787 | }, |
3788 | .probe = ucc_geth_probe, |
3789 | .remove_new = ucc_geth_remove, |
3790 | .suspend = ucc_geth_suspend, |
3791 | .resume = ucc_geth_resume, |
3792 | }; |
3793 | |
3794 | static int __init ucc_geth_init(void) |
3795 | { |
3796 | if (netif_msg_drv(&debug)) |
3797 | pr_info(DRV_DESC "\n" ); |
3798 | |
3799 | return platform_driver_register(&ucc_geth_driver); |
3800 | } |
3801 | |
3802 | static void __exit ucc_geth_exit(void) |
3803 | { |
3804 | platform_driver_unregister(&ucc_geth_driver); |
3805 | } |
3806 | |
3807 | module_init(ucc_geth_init); |
3808 | module_exit(ucc_geth_exit); |
3809 | |
3810 | MODULE_AUTHOR("Freescale Semiconductor, Inc" ); |
3811 | MODULE_DESCRIPTION(DRV_DESC); |
3812 | MODULE_LICENSE("GPL" ); |
3813 | |