1 | /* |
2 | * Driver for (BCM4706)? GBit MAC core on BCMA bus. |
3 | * |
4 | * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> |
5 | * |
6 | * Licensed under the GNU/GPL. See COPYING for details. |
7 | */ |
8 | |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/bcma/bcma.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/bcm47xx_nvram.h> |
16 | #include <linux/phy.h> |
17 | #include <linux/phy_fixed.h> |
18 | #include <net/dsa.h> |
19 | #include "bgmac.h" |
20 | |
21 | static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, |
22 | u32 value, int timeout) |
23 | { |
24 | u32 val; |
25 | int i; |
26 | |
27 | for (i = 0; i < timeout / 10; i++) { |
28 | val = bgmac_read(bgmac, offset: reg); |
29 | if ((val & mask) == value) |
30 | return true; |
31 | udelay(10); |
32 | } |
33 | dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n" , reg); |
34 | return false; |
35 | } |
36 | |
37 | /************************************************** |
38 | * DMA |
39 | **************************************************/ |
40 | |
41 | static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) |
42 | { |
43 | u32 val; |
44 | int i; |
45 | |
46 | if (!ring->mmio_base) |
47 | return; |
48 | |
49 | /* Suspend DMA TX ring first. |
50 | * bgmac_wait_value doesn't support waiting for any of few values, so |
51 | * implement whole loop here. |
52 | */ |
53 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_CTL, |
54 | BGMAC_DMA_TX_SUSPEND); |
55 | for (i = 0; i < 10000 / 10; i++) { |
56 | val = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_STATUS); |
57 | val &= BGMAC_DMA_TX_STAT; |
58 | if (val == BGMAC_DMA_TX_STAT_DISABLED || |
59 | val == BGMAC_DMA_TX_STAT_IDLEWAIT || |
60 | val == BGMAC_DMA_TX_STAT_STOPPED) { |
61 | i = 0; |
62 | break; |
63 | } |
64 | udelay(10); |
65 | } |
66 | if (i) |
67 | dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n" , |
68 | ring->mmio_base, val); |
69 | |
70 | /* Remove SUSPEND bit */ |
71 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_CTL, value: 0); |
72 | if (!bgmac_wait_value(bgmac, |
73 | reg: ring->mmio_base + BGMAC_DMA_TX_STATUS, |
74 | BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, |
75 | timeout: 10000)) { |
76 | dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n" , |
77 | ring->mmio_base); |
78 | udelay(300); |
79 | val = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_STATUS); |
80 | if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) |
81 | dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n" , |
82 | ring->mmio_base); |
83 | } |
84 | } |
85 | |
86 | static void bgmac_dma_tx_enable(struct bgmac *bgmac, |
87 | struct bgmac_dma_ring *ring) |
88 | { |
89 | u32 ctl; |
90 | |
91 | ctl = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_CTL); |
92 | if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) { |
93 | ctl &= ~BGMAC_DMA_TX_BL_MASK; |
94 | ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; |
95 | |
96 | ctl &= ~BGMAC_DMA_TX_MR_MASK; |
97 | ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT; |
98 | |
99 | ctl &= ~BGMAC_DMA_TX_PC_MASK; |
100 | ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT; |
101 | |
102 | ctl &= ~BGMAC_DMA_TX_PT_MASK; |
103 | ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; |
104 | } |
105 | ctl |= BGMAC_DMA_TX_ENABLE; |
106 | ctl |= BGMAC_DMA_TX_PARITY_DISABLE; |
107 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_CTL, value: ctl); |
108 | } |
109 | |
110 | static void |
111 | bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, |
112 | int i, int len, u32 ctl0) |
113 | { |
114 | struct bgmac_slot_info *slot; |
115 | struct bgmac_dma_desc *dma_desc; |
116 | u32 ctl1; |
117 | |
118 | if (i == BGMAC_TX_RING_SLOTS - 1) |
119 | ctl0 |= BGMAC_DESC_CTL0_EOT; |
120 | |
121 | ctl1 = len & BGMAC_DESC_CTL1_LEN; |
122 | |
123 | slot = &ring->slots[i]; |
124 | dma_desc = &ring->cpu_base[i]; |
125 | dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); |
126 | dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); |
127 | dma_desc->ctl0 = cpu_to_le32(ctl0); |
128 | dma_desc->ctl1 = cpu_to_le32(ctl1); |
129 | } |
130 | |
131 | static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, |
132 | struct bgmac_dma_ring *ring, |
133 | struct sk_buff *skb) |
134 | { |
135 | struct device *dma_dev = bgmac->dma_dev; |
136 | struct net_device *net_dev = bgmac->net_dev; |
137 | int index = ring->end % BGMAC_TX_RING_SLOTS; |
138 | struct bgmac_slot_info *slot = &ring->slots[index]; |
139 | int nr_frags; |
140 | u32 flags; |
141 | int i; |
142 | |
143 | if (skb->len > BGMAC_DESC_CTL1_LEN) { |
144 | netdev_err(dev: bgmac->net_dev, format: "Too long skb (%d)\n" , skb->len); |
145 | goto err_drop; |
146 | } |
147 | |
148 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
149 | skb_checksum_help(skb); |
150 | |
151 | nr_frags = skb_shinfo(skb)->nr_frags; |
152 | |
153 | /* ring->end - ring->start will return the number of valid slots, |
154 | * even when ring->end overflows |
155 | */ |
156 | if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { |
157 | netdev_err(dev: bgmac->net_dev, format: "TX ring is full, queue should be stopped!\n" ); |
158 | netif_stop_queue(dev: net_dev); |
159 | return NETDEV_TX_BUSY; |
160 | } |
161 | |
162 | slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), |
163 | DMA_TO_DEVICE); |
164 | if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) |
165 | goto err_dma_head; |
166 | |
167 | flags = BGMAC_DESC_CTL0_SOF; |
168 | if (!nr_frags) |
169 | flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; |
170 | |
171 | bgmac_dma_tx_add_buf(bgmac, ring, i: index, len: skb_headlen(skb), ctl0: flags); |
172 | flags = 0; |
173 | |
174 | for (i = 0; i < nr_frags; i++) { |
175 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
176 | int len = skb_frag_size(frag); |
177 | |
178 | index = (index + 1) % BGMAC_TX_RING_SLOTS; |
179 | slot = &ring->slots[index]; |
180 | slot->dma_addr = skb_frag_dma_map(dev: dma_dev, frag, offset: 0, |
181 | size: len, dir: DMA_TO_DEVICE); |
182 | if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) |
183 | goto err_dma; |
184 | |
185 | if (i == nr_frags - 1) |
186 | flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; |
187 | |
188 | bgmac_dma_tx_add_buf(bgmac, ring, i: index, len, ctl0: flags); |
189 | } |
190 | |
191 | slot->skb = skb; |
192 | netdev_sent_queue(dev: net_dev, bytes: skb->len); |
193 | ring->end += nr_frags + 1; |
194 | |
195 | wmb(); |
196 | |
197 | /* Increase ring->end to point empty slot. We tell hardware the first |
198 | * slot it should *not* read. |
199 | */ |
200 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_INDEX, |
201 | value: ring->index_base + |
202 | (ring->end % BGMAC_TX_RING_SLOTS) * |
203 | sizeof(struct bgmac_dma_desc)); |
204 | |
205 | if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) |
206 | netif_stop_queue(dev: net_dev); |
207 | |
208 | return NETDEV_TX_OK; |
209 | |
210 | err_dma: |
211 | dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), |
212 | DMA_TO_DEVICE); |
213 | |
214 | while (i-- > 0) { |
215 | int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; |
216 | struct bgmac_slot_info *slot = &ring->slots[index]; |
217 | u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); |
218 | int len = ctl1 & BGMAC_DESC_CTL1_LEN; |
219 | |
220 | dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); |
221 | } |
222 | |
223 | err_dma_head: |
224 | netdev_err(dev: bgmac->net_dev, format: "Mapping error of skb on ring 0x%X\n" , |
225 | ring->mmio_base); |
226 | |
227 | err_drop: |
228 | dev_kfree_skb(skb); |
229 | net_dev->stats.tx_dropped++; |
230 | net_dev->stats.tx_errors++; |
231 | return NETDEV_TX_OK; |
232 | } |
233 | |
234 | /* Free transmitted packets */ |
235 | static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) |
236 | { |
237 | struct device *dma_dev = bgmac->dma_dev; |
238 | int empty_slot; |
239 | unsigned bytes_compl = 0, pkts_compl = 0; |
240 | |
241 | /* The last slot that hardware didn't consume yet */ |
242 | empty_slot = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_STATUS); |
243 | empty_slot &= BGMAC_DMA_TX_STATDPTR; |
244 | empty_slot -= ring->index_base; |
245 | empty_slot &= BGMAC_DMA_TX_STATDPTR; |
246 | empty_slot /= sizeof(struct bgmac_dma_desc); |
247 | |
248 | while (ring->start != ring->end) { |
249 | int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; |
250 | struct bgmac_slot_info *slot = &ring->slots[slot_idx]; |
251 | u32 ctl0, ctl1; |
252 | int len; |
253 | |
254 | if (slot_idx == empty_slot) |
255 | break; |
256 | |
257 | ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); |
258 | ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); |
259 | len = ctl1 & BGMAC_DESC_CTL1_LEN; |
260 | if (ctl0 & BGMAC_DESC_CTL0_SOF) |
261 | /* Unmap no longer used buffer */ |
262 | dma_unmap_single(dma_dev, slot->dma_addr, len, |
263 | DMA_TO_DEVICE); |
264 | else |
265 | dma_unmap_page(dma_dev, slot->dma_addr, len, |
266 | DMA_TO_DEVICE); |
267 | |
268 | if (slot->skb) { |
269 | bgmac->net_dev->stats.tx_bytes += slot->skb->len; |
270 | bgmac->net_dev->stats.tx_packets++; |
271 | bytes_compl += slot->skb->len; |
272 | pkts_compl++; |
273 | |
274 | /* Free memory! :) */ |
275 | dev_kfree_skb(slot->skb); |
276 | slot->skb = NULL; |
277 | } |
278 | |
279 | slot->dma_addr = 0; |
280 | ring->start++; |
281 | } |
282 | |
283 | if (!pkts_compl) |
284 | return; |
285 | |
286 | netdev_completed_queue(dev: bgmac->net_dev, pkts: pkts_compl, bytes: bytes_compl); |
287 | |
288 | if (netif_queue_stopped(dev: bgmac->net_dev)) |
289 | netif_wake_queue(dev: bgmac->net_dev); |
290 | } |
291 | |
292 | static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) |
293 | { |
294 | if (!ring->mmio_base) |
295 | return; |
296 | |
297 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_CTL, value: 0); |
298 | if (!bgmac_wait_value(bgmac, |
299 | reg: ring->mmio_base + BGMAC_DMA_RX_STATUS, |
300 | BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, |
301 | timeout: 10000)) |
302 | dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n" , |
303 | ring->mmio_base); |
304 | } |
305 | |
306 | static void bgmac_dma_rx_enable(struct bgmac *bgmac, |
307 | struct bgmac_dma_ring *ring) |
308 | { |
309 | u32 ctl; |
310 | |
311 | ctl = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_CTL); |
312 | |
313 | /* preserve ONLY bits 16-17 from current hardware value */ |
314 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; |
315 | |
316 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { |
317 | ctl &= ~BGMAC_DMA_RX_BL_MASK; |
318 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; |
319 | |
320 | ctl &= ~BGMAC_DMA_RX_PC_MASK; |
321 | ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT; |
322 | |
323 | ctl &= ~BGMAC_DMA_RX_PT_MASK; |
324 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; |
325 | } |
326 | ctl |= BGMAC_DMA_RX_ENABLE; |
327 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; |
328 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; |
329 | ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; |
330 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_CTL, value: ctl); |
331 | } |
332 | |
333 | static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, |
334 | struct bgmac_slot_info *slot) |
335 | { |
336 | struct device *dma_dev = bgmac->dma_dev; |
337 | dma_addr_t dma_addr; |
338 | struct bgmac_rx_header *rx; |
339 | void *buf; |
340 | |
341 | /* Alloc skb */ |
342 | buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); |
343 | if (!buf) |
344 | return -ENOMEM; |
345 | |
346 | /* Poison - if everything goes fine, hardware will overwrite it */ |
347 | rx = buf + BGMAC_RX_BUF_OFFSET; |
348 | rx->len = cpu_to_le16(0xdead); |
349 | rx->flags = cpu_to_le16(0xbeef); |
350 | |
351 | /* Map skb for the DMA */ |
352 | dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, |
353 | BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); |
354 | if (dma_mapping_error(dev: dma_dev, dma_addr)) { |
355 | netdev_err(dev: bgmac->net_dev, format: "DMA mapping error\n" ); |
356 | put_page(page: virt_to_head_page(x: buf)); |
357 | return -ENOMEM; |
358 | } |
359 | |
360 | /* Update the slot */ |
361 | slot->buf = buf; |
362 | slot->dma_addr = dma_addr; |
363 | |
364 | return 0; |
365 | } |
366 | |
367 | static void bgmac_dma_rx_update_index(struct bgmac *bgmac, |
368 | struct bgmac_dma_ring *ring) |
369 | { |
370 | dma_wmb(); |
371 | |
372 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_INDEX, |
373 | value: ring->index_base + |
374 | ring->end * sizeof(struct bgmac_dma_desc)); |
375 | } |
376 | |
377 | static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, |
378 | struct bgmac_dma_ring *ring, int desc_idx) |
379 | { |
380 | struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; |
381 | u32 ctl0 = 0, ctl1 = 0; |
382 | |
383 | if (desc_idx == BGMAC_RX_RING_SLOTS - 1) |
384 | ctl0 |= BGMAC_DESC_CTL0_EOT; |
385 | ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; |
386 | /* Is there any BGMAC device that requires extension? */ |
387 | /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & |
388 | * B43_DMA64_DCTL1_ADDREXT_MASK; |
389 | */ |
390 | |
391 | dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); |
392 | dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); |
393 | dma_desc->ctl0 = cpu_to_le32(ctl0); |
394 | dma_desc->ctl1 = cpu_to_le32(ctl1); |
395 | |
396 | ring->end = desc_idx; |
397 | } |
398 | |
399 | static void bgmac_dma_rx_poison_buf(struct device *dma_dev, |
400 | struct bgmac_slot_info *slot) |
401 | { |
402 | struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; |
403 | |
404 | dma_sync_single_for_cpu(dev: dma_dev, addr: slot->dma_addr, BGMAC_RX_BUF_SIZE, |
405 | dir: DMA_FROM_DEVICE); |
406 | rx->len = cpu_to_le16(0xdead); |
407 | rx->flags = cpu_to_le16(0xbeef); |
408 | dma_sync_single_for_device(dev: dma_dev, addr: slot->dma_addr, BGMAC_RX_BUF_SIZE, |
409 | dir: DMA_FROM_DEVICE); |
410 | } |
411 | |
412 | static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, |
413 | int weight) |
414 | { |
415 | u32 end_slot; |
416 | int handled = 0; |
417 | |
418 | end_slot = bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_STATUS); |
419 | end_slot &= BGMAC_DMA_RX_STATDPTR; |
420 | end_slot -= ring->index_base; |
421 | end_slot &= BGMAC_DMA_RX_STATDPTR; |
422 | end_slot /= sizeof(struct bgmac_dma_desc); |
423 | |
424 | while (ring->start != end_slot) { |
425 | struct device *dma_dev = bgmac->dma_dev; |
426 | struct bgmac_slot_info *slot = &ring->slots[ring->start]; |
427 | struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; |
428 | struct sk_buff *skb; |
429 | void *buf = slot->buf; |
430 | dma_addr_t dma_addr = slot->dma_addr; |
431 | u16 len, flags; |
432 | |
433 | do { |
434 | /* Prepare new skb as replacement */ |
435 | if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { |
436 | bgmac_dma_rx_poison_buf(dma_dev, slot); |
437 | break; |
438 | } |
439 | |
440 | /* Unmap buffer to make it accessible to the CPU */ |
441 | dma_unmap_single(dma_dev, dma_addr, |
442 | BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); |
443 | |
444 | /* Get info from the header */ |
445 | len = le16_to_cpu(rx->len); |
446 | flags = le16_to_cpu(rx->flags); |
447 | |
448 | /* Check for poison and drop or pass the packet */ |
449 | if (len == 0xdead && flags == 0xbeef) { |
450 | netdev_err(dev: bgmac->net_dev, format: "Found poisoned packet at slot %d, DMA issue!\n" , |
451 | ring->start); |
452 | put_page(page: virt_to_head_page(x: buf)); |
453 | bgmac->net_dev->stats.rx_errors++; |
454 | break; |
455 | } |
456 | |
457 | if (len > BGMAC_RX_ALLOC_SIZE) { |
458 | netdev_err(dev: bgmac->net_dev, format: "Found oversized packet at slot %d, DMA issue!\n" , |
459 | ring->start); |
460 | put_page(page: virt_to_head_page(x: buf)); |
461 | bgmac->net_dev->stats.rx_length_errors++; |
462 | bgmac->net_dev->stats.rx_errors++; |
463 | break; |
464 | } |
465 | |
466 | /* Omit CRC. */ |
467 | len -= ETH_FCS_LEN; |
468 | |
469 | skb = build_skb(data: buf, BGMAC_RX_ALLOC_SIZE); |
470 | if (unlikely(!skb)) { |
471 | netdev_err(dev: bgmac->net_dev, format: "build_skb failed\n" ); |
472 | put_page(page: virt_to_head_page(x: buf)); |
473 | bgmac->net_dev->stats.rx_errors++; |
474 | break; |
475 | } |
476 | skb_put(skb, BGMAC_RX_FRAME_OFFSET + |
477 | BGMAC_RX_BUF_OFFSET + len); |
478 | skb_pull(skb, BGMAC_RX_FRAME_OFFSET + |
479 | BGMAC_RX_BUF_OFFSET); |
480 | |
481 | skb_checksum_none_assert(skb); |
482 | skb->protocol = eth_type_trans(skb, dev: bgmac->net_dev); |
483 | bgmac->net_dev->stats.rx_bytes += len; |
484 | bgmac->net_dev->stats.rx_packets++; |
485 | napi_gro_receive(napi: &bgmac->napi, skb); |
486 | handled++; |
487 | } while (0); |
488 | |
489 | bgmac_dma_rx_setup_desc(bgmac, ring, desc_idx: ring->start); |
490 | |
491 | if (++ring->start >= BGMAC_RX_RING_SLOTS) |
492 | ring->start = 0; |
493 | |
494 | if (handled >= weight) /* Should never be greater */ |
495 | break; |
496 | } |
497 | |
498 | bgmac_dma_rx_update_index(bgmac, ring); |
499 | |
500 | return handled; |
501 | } |
502 | |
503 | /* Does ring support unaligned addressing? */ |
504 | static bool bgmac_dma_unaligned(struct bgmac *bgmac, |
505 | struct bgmac_dma_ring *ring, |
506 | enum bgmac_dma_ring_type ring_type) |
507 | { |
508 | switch (ring_type) { |
509 | case BGMAC_DMA_RING_TX: |
510 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_RINGLO, |
511 | value: 0xff0); |
512 | if (bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_RINGLO)) |
513 | return true; |
514 | break; |
515 | case BGMAC_DMA_RING_RX: |
516 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_RINGLO, |
517 | value: 0xff0); |
518 | if (bgmac_read(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_RINGLO)) |
519 | return true; |
520 | break; |
521 | } |
522 | return false; |
523 | } |
524 | |
525 | static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, |
526 | struct bgmac_dma_ring *ring) |
527 | { |
528 | struct device *dma_dev = bgmac->dma_dev; |
529 | struct bgmac_dma_desc *dma_desc = ring->cpu_base; |
530 | struct bgmac_slot_info *slot; |
531 | int i; |
532 | |
533 | for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { |
534 | u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); |
535 | unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; |
536 | |
537 | slot = &ring->slots[i]; |
538 | dev_kfree_skb(slot->skb); |
539 | |
540 | if (!slot->dma_addr) |
541 | continue; |
542 | |
543 | if (slot->skb) |
544 | dma_unmap_single(dma_dev, slot->dma_addr, |
545 | len, DMA_TO_DEVICE); |
546 | else |
547 | dma_unmap_page(dma_dev, slot->dma_addr, |
548 | len, DMA_TO_DEVICE); |
549 | } |
550 | } |
551 | |
552 | static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, |
553 | struct bgmac_dma_ring *ring) |
554 | { |
555 | struct device *dma_dev = bgmac->dma_dev; |
556 | struct bgmac_slot_info *slot; |
557 | int i; |
558 | |
559 | for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) { |
560 | slot = &ring->slots[i]; |
561 | if (!slot->dma_addr) |
562 | continue; |
563 | |
564 | dma_unmap_single(dma_dev, slot->dma_addr, |
565 | BGMAC_RX_BUF_SIZE, |
566 | DMA_FROM_DEVICE); |
567 | put_page(page: virt_to_head_page(x: slot->buf)); |
568 | slot->dma_addr = 0; |
569 | } |
570 | } |
571 | |
572 | static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, |
573 | struct bgmac_dma_ring *ring, |
574 | int num_slots) |
575 | { |
576 | struct device *dma_dev = bgmac->dma_dev; |
577 | int size; |
578 | |
579 | if (!ring->cpu_base) |
580 | return; |
581 | |
582 | /* Free ring of descriptors */ |
583 | size = num_slots * sizeof(struct bgmac_dma_desc); |
584 | dma_free_coherent(dev: dma_dev, size, cpu_addr: ring->cpu_base, |
585 | dma_handle: ring->dma_base); |
586 | } |
587 | |
588 | static void bgmac_dma_cleanup(struct bgmac *bgmac) |
589 | { |
590 | int i; |
591 | |
592 | for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) |
593 | bgmac_dma_tx_ring_free(bgmac, ring: &bgmac->tx_ring[i]); |
594 | |
595 | for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) |
596 | bgmac_dma_rx_ring_free(bgmac, ring: &bgmac->rx_ring[i]); |
597 | } |
598 | |
599 | static void bgmac_dma_free(struct bgmac *bgmac) |
600 | { |
601 | int i; |
602 | |
603 | for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) |
604 | bgmac_dma_ring_desc_free(bgmac, ring: &bgmac->tx_ring[i], |
605 | BGMAC_TX_RING_SLOTS); |
606 | |
607 | for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) |
608 | bgmac_dma_ring_desc_free(bgmac, ring: &bgmac->rx_ring[i], |
609 | BGMAC_RX_RING_SLOTS); |
610 | } |
611 | |
612 | static int bgmac_dma_alloc(struct bgmac *bgmac) |
613 | { |
614 | struct device *dma_dev = bgmac->dma_dev; |
615 | struct bgmac_dma_ring *ring; |
616 | static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, |
617 | BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; |
618 | int size; /* ring size: different for Tx and Rx */ |
619 | int i; |
620 | |
621 | BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); |
622 | BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); |
623 | |
624 | if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { |
625 | if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { |
626 | dev_err(bgmac->dev, "Core does not report 64-bit DMA\n" ); |
627 | return -ENOTSUPP; |
628 | } |
629 | } |
630 | |
631 | for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { |
632 | ring = &bgmac->tx_ring[i]; |
633 | ring->mmio_base = ring_base[i]; |
634 | |
635 | /* Alloc ring of descriptors */ |
636 | size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); |
637 | ring->cpu_base = dma_alloc_coherent(dev: dma_dev, size, |
638 | dma_handle: &ring->dma_base, |
639 | GFP_KERNEL); |
640 | if (!ring->cpu_base) { |
641 | dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n" , |
642 | ring->mmio_base); |
643 | goto err_dma_free; |
644 | } |
645 | |
646 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
647 | ring_type: BGMAC_DMA_RING_TX); |
648 | if (ring->unaligned) |
649 | ring->index_base = lower_32_bits(ring->dma_base); |
650 | else |
651 | ring->index_base = 0; |
652 | |
653 | /* No need to alloc TX slots yet */ |
654 | } |
655 | |
656 | for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { |
657 | ring = &bgmac->rx_ring[i]; |
658 | ring->mmio_base = ring_base[i]; |
659 | |
660 | /* Alloc ring of descriptors */ |
661 | size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); |
662 | ring->cpu_base = dma_alloc_coherent(dev: dma_dev, size, |
663 | dma_handle: &ring->dma_base, |
664 | GFP_KERNEL); |
665 | if (!ring->cpu_base) { |
666 | dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n" , |
667 | ring->mmio_base); |
668 | goto err_dma_free; |
669 | } |
670 | |
671 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
672 | ring_type: BGMAC_DMA_RING_RX); |
673 | if (ring->unaligned) |
674 | ring->index_base = lower_32_bits(ring->dma_base); |
675 | else |
676 | ring->index_base = 0; |
677 | } |
678 | |
679 | return 0; |
680 | |
681 | err_dma_free: |
682 | bgmac_dma_free(bgmac); |
683 | return -ENOMEM; |
684 | } |
685 | |
686 | static int bgmac_dma_init(struct bgmac *bgmac) |
687 | { |
688 | struct bgmac_dma_ring *ring; |
689 | int i, err; |
690 | |
691 | for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { |
692 | ring = &bgmac->tx_ring[i]; |
693 | |
694 | if (!ring->unaligned) |
695 | bgmac_dma_tx_enable(bgmac, ring); |
696 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_RINGLO, |
697 | lower_32_bits(ring->dma_base)); |
698 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_TX_RINGHI, |
699 | upper_32_bits(ring->dma_base)); |
700 | if (ring->unaligned) |
701 | bgmac_dma_tx_enable(bgmac, ring); |
702 | |
703 | ring->start = 0; |
704 | ring->end = 0; /* Points the slot that should *not* be read */ |
705 | } |
706 | |
707 | for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { |
708 | int j; |
709 | |
710 | ring = &bgmac->rx_ring[i]; |
711 | |
712 | if (!ring->unaligned) |
713 | bgmac_dma_rx_enable(bgmac, ring); |
714 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_RINGLO, |
715 | lower_32_bits(ring->dma_base)); |
716 | bgmac_write(bgmac, offset: ring->mmio_base + BGMAC_DMA_RX_RINGHI, |
717 | upper_32_bits(ring->dma_base)); |
718 | if (ring->unaligned) |
719 | bgmac_dma_rx_enable(bgmac, ring); |
720 | |
721 | ring->start = 0; |
722 | ring->end = 0; |
723 | for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) { |
724 | err = bgmac_dma_rx_skb_for_slot(bgmac, slot: &ring->slots[j]); |
725 | if (err) |
726 | goto error; |
727 | |
728 | bgmac_dma_rx_setup_desc(bgmac, ring, desc_idx: j); |
729 | } |
730 | |
731 | bgmac_dma_rx_update_index(bgmac, ring); |
732 | } |
733 | |
734 | return 0; |
735 | |
736 | error: |
737 | bgmac_dma_cleanup(bgmac); |
738 | return err; |
739 | } |
740 | |
741 | |
742 | /************************************************** |
743 | * Chip ops |
744 | **************************************************/ |
745 | |
746 | /* TODO: can we just drop @force? Can we don't reset MAC at all if there is |
747 | * nothing to change? Try if after stabilizng driver. |
748 | */ |
749 | static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set, |
750 | bool force) |
751 | { |
752 | u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD); |
753 | u32 new_val = (cmdcfg & mask) | set; |
754 | u32 cmdcfg_sr; |
755 | |
756 | if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) |
757 | cmdcfg_sr = CMD_SW_RESET; |
758 | else |
759 | cmdcfg_sr = CMD_SW_RESET_OLD; |
760 | |
761 | bgmac_umac_maskset(bgmac, UMAC_CMD, mask: ~0, set: cmdcfg_sr); |
762 | udelay(2); |
763 | |
764 | if (new_val != cmdcfg || force) |
765 | bgmac_umac_write(bgmac, UMAC_CMD, value: new_val); |
766 | |
767 | bgmac_umac_maskset(bgmac, UMAC_CMD, mask: ~cmdcfg_sr, set: 0); |
768 | udelay(2); |
769 | } |
770 | |
771 | static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr) |
772 | { |
773 | u32 tmp; |
774 | |
775 | tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; |
776 | bgmac_umac_write(bgmac, UMAC_MAC0, value: tmp); |
777 | tmp = (addr[4] << 8) | addr[5]; |
778 | bgmac_umac_write(bgmac, UMAC_MAC1, value: tmp); |
779 | } |
780 | |
781 | static void bgmac_set_rx_mode(struct net_device *net_dev) |
782 | { |
783 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
784 | |
785 | if (net_dev->flags & IFF_PROMISC) |
786 | bgmac_umac_cmd_maskset(bgmac, mask: ~0, CMD_PROMISC, force: true); |
787 | else |
788 | bgmac_umac_cmd_maskset(bgmac, mask: ~CMD_PROMISC, set: 0, force: true); |
789 | } |
790 | |
791 | #if 0 /* We don't use that regs yet */ |
792 | static void bgmac_chip_stats_update(struct bgmac *bgmac) |
793 | { |
794 | int i; |
795 | |
796 | if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) { |
797 | for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) |
798 | bgmac->mib_tx_regs[i] = |
799 | bgmac_read(bgmac, |
800 | BGMAC_TX_GOOD_OCTETS + (i * 4)); |
801 | for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) |
802 | bgmac->mib_rx_regs[i] = |
803 | bgmac_read(bgmac, |
804 | BGMAC_RX_GOOD_OCTETS + (i * 4)); |
805 | } |
806 | |
807 | /* TODO: what else? how to handle BCM4706? Specs are needed */ |
808 | } |
809 | #endif |
810 | |
811 | static void bgmac_clear_mib(struct bgmac *bgmac) |
812 | { |
813 | int i; |
814 | |
815 | if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB) |
816 | return; |
817 | |
818 | bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); |
819 | for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) |
820 | bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); |
821 | for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) |
822 | bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); |
823 | } |
824 | |
825 | /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ |
826 | static void bgmac_mac_speed(struct bgmac *bgmac) |
827 | { |
828 | u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN); |
829 | u32 set = 0; |
830 | |
831 | switch (bgmac->mac_speed) { |
832 | case SPEED_10: |
833 | set |= CMD_SPEED_10 << CMD_SPEED_SHIFT; |
834 | break; |
835 | case SPEED_100: |
836 | set |= CMD_SPEED_100 << CMD_SPEED_SHIFT; |
837 | break; |
838 | case SPEED_1000: |
839 | set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT; |
840 | break; |
841 | case SPEED_2500: |
842 | set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT; |
843 | break; |
844 | default: |
845 | dev_err(bgmac->dev, "Unsupported speed: %d\n" , |
846 | bgmac->mac_speed); |
847 | } |
848 | |
849 | if (bgmac->mac_duplex == DUPLEX_HALF) |
850 | set |= CMD_HD_EN; |
851 | |
852 | bgmac_umac_cmd_maskset(bgmac, mask, set, force: true); |
853 | } |
854 | |
855 | static void bgmac_miiconfig(struct bgmac *bgmac) |
856 | { |
857 | if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { |
858 | if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { |
859 | bgmac_idm_write(bgmac, BCMA_IOCTL, |
860 | value: bgmac_idm_read(bgmac, BCMA_IOCTL) | |
861 | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); |
862 | } |
863 | bgmac->mac_speed = SPEED_2500; |
864 | bgmac->mac_duplex = DUPLEX_FULL; |
865 | bgmac_mac_speed(bgmac); |
866 | } else { |
867 | u8 imode; |
868 | |
869 | imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & |
870 | BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; |
871 | if (imode == 0 || imode == 1) { |
872 | bgmac->mac_speed = SPEED_100; |
873 | bgmac->mac_duplex = DUPLEX_FULL; |
874 | bgmac_mac_speed(bgmac); |
875 | } |
876 | } |
877 | } |
878 | |
879 | static void bgmac_chip_reset_idm_config(struct bgmac *bgmac) |
880 | { |
881 | u32 iost; |
882 | |
883 | iost = bgmac_idm_read(bgmac, BCMA_IOST); |
884 | if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) |
885 | iost &= ~BGMAC_BCMA_IOST_ATTACHED; |
886 | |
887 | /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ |
888 | if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { |
889 | u32 flags = 0; |
890 | |
891 | if (iost & BGMAC_BCMA_IOST_ATTACHED) { |
892 | flags = BGMAC_BCMA_IOCTL_SW_CLKEN; |
893 | if (bgmac->in_init || !bgmac->has_robosw) |
894 | flags |= BGMAC_BCMA_IOCTL_SW_RESET; |
895 | } |
896 | bgmac_clk_enable(bgmac, flags); |
897 | } |
898 | |
899 | if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw)) |
900 | bgmac_idm_write(bgmac, BCMA_IOCTL, |
901 | value: bgmac_idm_read(bgmac, BCMA_IOCTL) & |
902 | ~BGMAC_BCMA_IOCTL_SW_RESET); |
903 | } |
904 | |
905 | /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ |
906 | static void bgmac_chip_reset(struct bgmac *bgmac) |
907 | { |
908 | u32 cmdcfg_sr; |
909 | int i; |
910 | |
911 | if (bgmac_clk_enabled(bgmac)) { |
912 | if (!bgmac->stats_grabbed) { |
913 | /* bgmac_chip_stats_update(bgmac); */ |
914 | bgmac->stats_grabbed = true; |
915 | } |
916 | |
917 | for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) |
918 | bgmac_dma_tx_reset(bgmac, ring: &bgmac->tx_ring[i]); |
919 | |
920 | bgmac_umac_cmd_maskset(bgmac, mask: ~0, CMD_LCL_LOOP_EN, force: false); |
921 | udelay(1); |
922 | |
923 | for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) |
924 | bgmac_dma_rx_reset(bgmac, ring: &bgmac->rx_ring[i]); |
925 | |
926 | /* TODO: Clear software multicast filter list */ |
927 | } |
928 | |
929 | if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) |
930 | bgmac_chip_reset_idm_config(bgmac); |
931 | |
932 | /* Request Misc PLL for corerev > 2 */ |
933 | if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { |
934 | bgmac_set(bgmac, BCMA_CLKCTLST, |
935 | BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); |
936 | bgmac_wait_value(bgmac, BCMA_CLKCTLST, |
937 | BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, |
938 | BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, |
939 | timeout: 1000); |
940 | } |
941 | |
942 | if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) { |
943 | u8 et_swtype = 0; |
944 | u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | |
945 | BGMAC_CHIPCTL_1_IF_TYPE_MII; |
946 | char buf[4]; |
947 | |
948 | if (bcm47xx_nvram_getenv(name: "et_swtype" , val: buf, val_len: sizeof(buf)) > 0) { |
949 | if (kstrtou8(s: buf, base: 0, res: &et_swtype)) |
950 | dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n" , |
951 | buf); |
952 | et_swtype &= 0x0f; |
953 | et_swtype <<= 4; |
954 | sw_type = et_swtype; |
955 | } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { |
956 | sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII | |
957 | BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; |
958 | } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { |
959 | sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | |
960 | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; |
961 | } |
962 | bgmac_cco_ctl_maskset(bgmac, offset: 1, mask: ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | |
963 | BGMAC_CHIPCTL_1_SW_TYPE_MASK), |
964 | set: sw_type); |
965 | } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) { |
966 | u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII | |
967 | BGMAC_CHIPCTL_4_SW_TYPE_EPHY; |
968 | u8 et_swtype = 0; |
969 | char buf[4]; |
970 | |
971 | if (bcm47xx_nvram_getenv(name: "et_swtype" , val: buf, val_len: sizeof(buf)) > 0) { |
972 | if (kstrtou8(s: buf, base: 0, res: &et_swtype)) |
973 | dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n" , |
974 | buf); |
975 | sw_type = (et_swtype & 0x0f) << 12; |
976 | } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) { |
977 | sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII | |
978 | BGMAC_CHIPCTL_4_SW_TYPE_RGMII; |
979 | } |
980 | bgmac_cco_ctl_maskset(bgmac, offset: 4, mask: ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK | |
981 | BGMAC_CHIPCTL_4_SW_TYPE_MASK), |
982 | set: sw_type); |
983 | } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) { |
984 | bgmac_cco_ctl_maskset(bgmac, offset: 7, mask: ~BGMAC_CHIPCTL_7_IF_TYPE_MASK, |
985 | BGMAC_CHIPCTL_7_IF_TYPE_RGMII); |
986 | } |
987 | |
988 | /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset |
989 | * Specs don't say about using UMAC_CMD_SR, but in this routine |
990 | * UMAC_CMD is read _after_ putting chip in a reset. So it has to |
991 | * be keps until taking MAC out of the reset. |
992 | */ |
993 | if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) |
994 | cmdcfg_sr = CMD_SW_RESET; |
995 | else |
996 | cmdcfg_sr = CMD_SW_RESET_OLD; |
997 | |
998 | bgmac_umac_cmd_maskset(bgmac, |
999 | mask: ~(CMD_TX_EN | |
1000 | CMD_RX_EN | |
1001 | CMD_RX_PAUSE_IGNORE | |
1002 | CMD_TX_ADDR_INS | |
1003 | CMD_HD_EN | |
1004 | CMD_LCL_LOOP_EN | |
1005 | CMD_CNTL_FRM_EN | |
1006 | CMD_RMT_LOOP_EN | |
1007 | CMD_RX_ERR_DISC | |
1008 | CMD_PRBL_EN | |
1009 | CMD_TX_PAUSE_IGNORE | |
1010 | CMD_PAD_EN | |
1011 | CMD_PAUSE_FWD), |
1012 | CMD_PROMISC | |
1013 | CMD_NO_LEN_CHK | |
1014 | CMD_CNTL_FRM_EN | |
1015 | cmdcfg_sr, |
1016 | force: false); |
1017 | bgmac->mac_speed = SPEED_UNKNOWN; |
1018 | bgmac->mac_duplex = DUPLEX_UNKNOWN; |
1019 | |
1020 | bgmac_clear_mib(bgmac); |
1021 | if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL) |
1022 | bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, mask: ~0, |
1023 | BCMA_GMAC_CMN_PC_MTE); |
1024 | else |
1025 | bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); |
1026 | bgmac_miiconfig(bgmac); |
1027 | if (bgmac->mii_bus) |
1028 | bgmac->mii_bus->reset(bgmac->mii_bus); |
1029 | |
1030 | netdev_reset_queue(dev_queue: bgmac->net_dev); |
1031 | } |
1032 | |
1033 | static void bgmac_chip_intrs_on(struct bgmac *bgmac) |
1034 | { |
1035 | bgmac_write(bgmac, BGMAC_INT_MASK, value: bgmac->int_mask); |
1036 | } |
1037 | |
1038 | static void bgmac_chip_intrs_off(struct bgmac *bgmac) |
1039 | { |
1040 | bgmac_write(bgmac, BGMAC_INT_MASK, value: 0); |
1041 | bgmac_read(bgmac, BGMAC_INT_MASK); |
1042 | } |
1043 | |
1044 | /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ |
1045 | static void bgmac_enable(struct bgmac *bgmac) |
1046 | { |
1047 | u32 cmdcfg_sr; |
1048 | u32 cmdcfg; |
1049 | u32 mode; |
1050 | |
1051 | if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) |
1052 | cmdcfg_sr = CMD_SW_RESET; |
1053 | else |
1054 | cmdcfg_sr = CMD_SW_RESET_OLD; |
1055 | |
1056 | cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD); |
1057 | bgmac_umac_cmd_maskset(bgmac, mask: ~(CMD_TX_EN | CMD_RX_EN), |
1058 | set: cmdcfg_sr, force: true); |
1059 | udelay(2); |
1060 | cmdcfg |= CMD_TX_EN | CMD_RX_EN; |
1061 | bgmac_umac_write(bgmac, UMAC_CMD, value: cmdcfg); |
1062 | |
1063 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> |
1064 | BGMAC_DS_MM_SHIFT; |
1065 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) |
1066 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); |
1067 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) |
1068 | bgmac_cco_ctl_maskset(bgmac, offset: 1, mask: ~0, |
1069 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); |
1070 | |
1071 | if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 | |
1072 | BGMAC_FEAT_FLW_CTRL2)) { |
1073 | u32 fl_ctl; |
1074 | |
1075 | if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1) |
1076 | fl_ctl = 0x2300e1; |
1077 | else |
1078 | fl_ctl = 0x03cb04cb; |
1079 | |
1080 | bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, value: fl_ctl); |
1081 | bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, value: 0x27fff); |
1082 | } |
1083 | |
1084 | if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) { |
1085 | u32 rxq_ctl; |
1086 | u16 bp_clk; |
1087 | u8 mdp; |
1088 | |
1089 | rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); |
1090 | rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; |
1091 | bp_clk = bgmac_get_bus_clock(bgmac) / 1000000; |
1092 | mdp = (bp_clk * 128 / 1000) - 3; |
1093 | rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); |
1094 | bgmac_write(bgmac, BGMAC_RXQ_CTL, value: rxq_ctl); |
1095 | } |
1096 | } |
1097 | |
1098 | /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ |
1099 | static void bgmac_chip_init(struct bgmac *bgmac) |
1100 | { |
1101 | /* Clear any erroneously pending interrupts */ |
1102 | bgmac_write(bgmac, BGMAC_INT_STATUS, value: ~0); |
1103 | |
1104 | /* 1 interrupt per received frame */ |
1105 | bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, value: 1 << BGMAC_IRL_FC_SHIFT); |
1106 | |
1107 | /* Enable 802.3x tx flow control (honor received PAUSE frames) */ |
1108 | bgmac_umac_cmd_maskset(bgmac, mask: ~CMD_RX_PAUSE_IGNORE, set: 0, force: true); |
1109 | |
1110 | bgmac_set_rx_mode(net_dev: bgmac->net_dev); |
1111 | |
1112 | bgmac_write_mac_address(bgmac, addr: bgmac->net_dev->dev_addr); |
1113 | |
1114 | if (bgmac->loopback) |
1115 | bgmac_umac_cmd_maskset(bgmac, mask: ~0, CMD_LCL_LOOP_EN, force: false); |
1116 | else |
1117 | bgmac_umac_cmd_maskset(bgmac, mask: ~CMD_LCL_LOOP_EN, set: 0, force: false); |
1118 | |
1119 | bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, value: 32 + ETHER_MAX_LEN); |
1120 | |
1121 | bgmac_chip_intrs_on(bgmac); |
1122 | |
1123 | bgmac_enable(bgmac); |
1124 | } |
1125 | |
1126 | static irqreturn_t bgmac_interrupt(int irq, void *dev_id) |
1127 | { |
1128 | struct bgmac *bgmac = netdev_priv(dev: dev_id); |
1129 | |
1130 | u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); |
1131 | int_status &= bgmac->int_mask; |
1132 | |
1133 | if (!int_status) |
1134 | return IRQ_NONE; |
1135 | |
1136 | int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); |
1137 | if (int_status) |
1138 | dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n" , int_status); |
1139 | |
1140 | /* Disable new interrupts until handling existing ones */ |
1141 | bgmac_chip_intrs_off(bgmac); |
1142 | |
1143 | napi_schedule(n: &bgmac->napi); |
1144 | |
1145 | return IRQ_HANDLED; |
1146 | } |
1147 | |
1148 | static int bgmac_poll(struct napi_struct *napi, int weight) |
1149 | { |
1150 | struct bgmac *bgmac = container_of(napi, struct bgmac, napi); |
1151 | int handled = 0; |
1152 | |
1153 | /* Ack */ |
1154 | bgmac_write(bgmac, BGMAC_INT_STATUS, value: ~0); |
1155 | |
1156 | bgmac_dma_tx_free(bgmac, ring: &bgmac->tx_ring[0]); |
1157 | handled += bgmac_dma_rx_read(bgmac, ring: &bgmac->rx_ring[0], weight); |
1158 | |
1159 | /* Poll again if more events arrived in the meantime */ |
1160 | if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) |
1161 | return weight; |
1162 | |
1163 | if (handled < weight) { |
1164 | napi_complete_done(n: napi, work_done: handled); |
1165 | bgmac_chip_intrs_on(bgmac); |
1166 | } |
1167 | |
1168 | return handled; |
1169 | } |
1170 | |
1171 | /************************************************** |
1172 | * net_device_ops |
1173 | **************************************************/ |
1174 | |
1175 | static int bgmac_open(struct net_device *net_dev) |
1176 | { |
1177 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1178 | int err = 0; |
1179 | |
1180 | bgmac_chip_reset(bgmac); |
1181 | |
1182 | err = bgmac_dma_init(bgmac); |
1183 | if (err) |
1184 | return err; |
1185 | |
1186 | /* Specs say about reclaiming rings here, but we do that in DMA init */ |
1187 | bgmac_chip_init(bgmac); |
1188 | |
1189 | err = request_irq(irq: bgmac->irq, handler: bgmac_interrupt, IRQF_SHARED, |
1190 | name: net_dev->name, dev: net_dev); |
1191 | if (err < 0) { |
1192 | dev_err(bgmac->dev, "IRQ request error: %d!\n" , err); |
1193 | bgmac_dma_cleanup(bgmac); |
1194 | return err; |
1195 | } |
1196 | napi_enable(n: &bgmac->napi); |
1197 | |
1198 | phy_start(phydev: net_dev->phydev); |
1199 | |
1200 | netif_start_queue(dev: net_dev); |
1201 | |
1202 | return 0; |
1203 | } |
1204 | |
1205 | static int bgmac_stop(struct net_device *net_dev) |
1206 | { |
1207 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1208 | |
1209 | netif_carrier_off(dev: net_dev); |
1210 | |
1211 | phy_stop(phydev: net_dev->phydev); |
1212 | |
1213 | napi_disable(n: &bgmac->napi); |
1214 | bgmac_chip_intrs_off(bgmac); |
1215 | free_irq(bgmac->irq, net_dev); |
1216 | |
1217 | bgmac_chip_reset(bgmac); |
1218 | bgmac_dma_cleanup(bgmac); |
1219 | |
1220 | return 0; |
1221 | } |
1222 | |
1223 | static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, |
1224 | struct net_device *net_dev) |
1225 | { |
1226 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1227 | struct bgmac_dma_ring *ring; |
1228 | |
1229 | /* No QOS support yet */ |
1230 | ring = &bgmac->tx_ring[0]; |
1231 | return bgmac_dma_tx_add(bgmac, ring, skb); |
1232 | } |
1233 | |
1234 | static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) |
1235 | { |
1236 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1237 | struct sockaddr *sa = addr; |
1238 | int ret; |
1239 | |
1240 | ret = eth_prepare_mac_addr_change(dev: net_dev, p: addr); |
1241 | if (ret < 0) |
1242 | return ret; |
1243 | |
1244 | eth_hw_addr_set(dev: net_dev, addr: sa->sa_data); |
1245 | bgmac_write_mac_address(bgmac, addr: net_dev->dev_addr); |
1246 | |
1247 | eth_commit_mac_addr_change(dev: net_dev, p: addr); |
1248 | return 0; |
1249 | } |
1250 | |
1251 | static int bgmac_change_mtu(struct net_device *net_dev, int mtu) |
1252 | { |
1253 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1254 | |
1255 | bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, value: 32 + mtu); |
1256 | return 0; |
1257 | } |
1258 | |
1259 | static const struct net_device_ops bgmac_netdev_ops = { |
1260 | .ndo_open = bgmac_open, |
1261 | .ndo_stop = bgmac_stop, |
1262 | .ndo_start_xmit = bgmac_start_xmit, |
1263 | .ndo_set_rx_mode = bgmac_set_rx_mode, |
1264 | .ndo_set_mac_address = bgmac_set_mac_address, |
1265 | .ndo_validate_addr = eth_validate_addr, |
1266 | .ndo_eth_ioctl = phy_do_ioctl_running, |
1267 | .ndo_change_mtu = bgmac_change_mtu, |
1268 | }; |
1269 | |
1270 | /************************************************** |
1271 | * ethtool_ops |
1272 | **************************************************/ |
1273 | |
1274 | struct bgmac_stat { |
1275 | u8 size; |
1276 | u32 offset; |
1277 | const char *name; |
1278 | }; |
1279 | |
1280 | static struct bgmac_stat bgmac_get_strings_stats[] = { |
1281 | { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, |
1282 | { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, |
1283 | { 8, BGMAC_TX_OCTETS, "tx_octets" }, |
1284 | { 4, BGMAC_TX_PKTS, "tx_pkts" }, |
1285 | { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, |
1286 | { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, |
1287 | { 4, BGMAC_TX_LEN_64, "tx_64" }, |
1288 | { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, |
1289 | { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, |
1290 | { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, |
1291 | { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, |
1292 | { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, |
1293 | { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, |
1294 | { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, |
1295 | { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, |
1296 | { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, |
1297 | { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, |
1298 | { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, |
1299 | { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, |
1300 | { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, |
1301 | { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, |
1302 | { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, |
1303 | { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, |
1304 | { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, |
1305 | { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, |
1306 | { 4, BGMAC_TX_DEFERED, "tx_defered" }, |
1307 | { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, |
1308 | { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, |
1309 | { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, |
1310 | { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, |
1311 | { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, |
1312 | { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, |
1313 | { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, |
1314 | { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, |
1315 | { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, |
1316 | { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, |
1317 | { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, |
1318 | { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, |
1319 | { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, |
1320 | { 8, BGMAC_RX_OCTETS, "rx_octets" }, |
1321 | { 4, BGMAC_RX_PKTS, "rx_pkts" }, |
1322 | { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, |
1323 | { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, |
1324 | { 4, BGMAC_RX_LEN_64, "rx_64" }, |
1325 | { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, |
1326 | { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, |
1327 | { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, |
1328 | { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, |
1329 | { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, |
1330 | { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, |
1331 | { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, |
1332 | { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, |
1333 | { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, |
1334 | { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, |
1335 | { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, |
1336 | { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, |
1337 | { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, |
1338 | { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, |
1339 | { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, |
1340 | { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, |
1341 | { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, |
1342 | { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, |
1343 | { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, |
1344 | { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, |
1345 | { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, |
1346 | { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, |
1347 | }; |
1348 | |
1349 | #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) |
1350 | |
1351 | static int bgmac_get_sset_count(struct net_device *dev, int string_set) |
1352 | { |
1353 | switch (string_set) { |
1354 | case ETH_SS_STATS: |
1355 | return BGMAC_STATS_LEN; |
1356 | } |
1357 | |
1358 | return -EOPNOTSUPP; |
1359 | } |
1360 | |
1361 | static void bgmac_get_strings(struct net_device *dev, u32 stringset, |
1362 | u8 *data) |
1363 | { |
1364 | int i; |
1365 | |
1366 | if (stringset != ETH_SS_STATS) |
1367 | return; |
1368 | |
1369 | for (i = 0; i < BGMAC_STATS_LEN; i++) |
1370 | strscpy(p: data + i * ETH_GSTRING_LEN, |
1371 | q: bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); |
1372 | } |
1373 | |
1374 | static void bgmac_get_ethtool_stats(struct net_device *dev, |
1375 | struct ethtool_stats *ss, uint64_t *data) |
1376 | { |
1377 | struct bgmac *bgmac = netdev_priv(dev); |
1378 | const struct bgmac_stat *s; |
1379 | unsigned int i; |
1380 | u64 val; |
1381 | |
1382 | if (!netif_running(dev)) |
1383 | return; |
1384 | |
1385 | for (i = 0; i < BGMAC_STATS_LEN; i++) { |
1386 | s = &bgmac_get_strings_stats[i]; |
1387 | val = 0; |
1388 | if (s->size == 8) |
1389 | val = (u64)bgmac_read(bgmac, offset: s->offset + 4) << 32; |
1390 | val |= bgmac_read(bgmac, offset: s->offset); |
1391 | data[i] = val; |
1392 | } |
1393 | } |
1394 | |
1395 | static void bgmac_get_drvinfo(struct net_device *net_dev, |
1396 | struct ethtool_drvinfo *info) |
1397 | { |
1398 | strscpy(p: info->driver, KBUILD_MODNAME, size: sizeof(info->driver)); |
1399 | strscpy(p: info->bus_info, q: "AXI" , size: sizeof(info->bus_info)); |
1400 | } |
1401 | |
1402 | static const struct ethtool_ops bgmac_ethtool_ops = { |
1403 | .get_strings = bgmac_get_strings, |
1404 | .get_sset_count = bgmac_get_sset_count, |
1405 | .get_ethtool_stats = bgmac_get_ethtool_stats, |
1406 | .get_drvinfo = bgmac_get_drvinfo, |
1407 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1408 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1409 | }; |
1410 | |
1411 | /************************************************** |
1412 | * MII |
1413 | **************************************************/ |
1414 | |
1415 | void bgmac_adjust_link(struct net_device *net_dev) |
1416 | { |
1417 | struct bgmac *bgmac = netdev_priv(dev: net_dev); |
1418 | struct phy_device *phy_dev = net_dev->phydev; |
1419 | bool update = false; |
1420 | |
1421 | if (phy_dev->link) { |
1422 | if (phy_dev->speed != bgmac->mac_speed) { |
1423 | bgmac->mac_speed = phy_dev->speed; |
1424 | update = true; |
1425 | } |
1426 | |
1427 | if (phy_dev->duplex != bgmac->mac_duplex) { |
1428 | bgmac->mac_duplex = phy_dev->duplex; |
1429 | update = true; |
1430 | } |
1431 | } |
1432 | |
1433 | if (update) { |
1434 | bgmac_mac_speed(bgmac); |
1435 | phy_print_status(phydev: phy_dev); |
1436 | } |
1437 | } |
1438 | EXPORT_SYMBOL_GPL(bgmac_adjust_link); |
1439 | |
1440 | int bgmac_phy_connect_direct(struct bgmac *bgmac) |
1441 | { |
1442 | struct fixed_phy_status fphy_status = { |
1443 | .link = 1, |
1444 | .speed = SPEED_1000, |
1445 | .duplex = DUPLEX_FULL, |
1446 | }; |
1447 | struct phy_device *phy_dev; |
1448 | int err; |
1449 | |
1450 | phy_dev = fixed_phy_register(PHY_POLL, status: &fphy_status, NULL); |
1451 | if (IS_ERR(ptr: phy_dev)) { |
1452 | dev_err(bgmac->dev, "Failed to register fixed PHY device\n" ); |
1453 | return PTR_ERR(ptr: phy_dev); |
1454 | } |
1455 | |
1456 | err = phy_connect_direct(dev: bgmac->net_dev, phydev: phy_dev, handler: bgmac_adjust_link, |
1457 | interface: PHY_INTERFACE_MODE_MII); |
1458 | if (err) { |
1459 | dev_err(bgmac->dev, "Connecting PHY failed\n" ); |
1460 | return err; |
1461 | } |
1462 | |
1463 | return err; |
1464 | } |
1465 | EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); |
1466 | |
1467 | struct bgmac *bgmac_alloc(struct device *dev) |
1468 | { |
1469 | struct net_device *net_dev; |
1470 | struct bgmac *bgmac; |
1471 | |
1472 | /* Allocation and references */ |
1473 | net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); |
1474 | if (!net_dev) |
1475 | return NULL; |
1476 | |
1477 | net_dev->netdev_ops = &bgmac_netdev_ops; |
1478 | net_dev->ethtool_ops = &bgmac_ethtool_ops; |
1479 | |
1480 | bgmac = netdev_priv(dev: net_dev); |
1481 | bgmac->dev = dev; |
1482 | bgmac->net_dev = net_dev; |
1483 | |
1484 | return bgmac; |
1485 | } |
1486 | EXPORT_SYMBOL_GPL(bgmac_alloc); |
1487 | |
1488 | int bgmac_enet_probe(struct bgmac *bgmac) |
1489 | { |
1490 | struct net_device *net_dev = bgmac->net_dev; |
1491 | int err; |
1492 | |
1493 | bgmac->in_init = true; |
1494 | |
1495 | net_dev->irq = bgmac->irq; |
1496 | SET_NETDEV_DEV(net_dev, bgmac->dev); |
1497 | dev_set_drvdata(dev: bgmac->dev, data: bgmac); |
1498 | |
1499 | if (!is_valid_ether_addr(addr: net_dev->dev_addr)) { |
1500 | dev_err(bgmac->dev, "Invalid MAC addr: %pM\n" , |
1501 | net_dev->dev_addr); |
1502 | eth_hw_addr_random(dev: net_dev); |
1503 | dev_warn(bgmac->dev, "Using random MAC: %pM\n" , |
1504 | net_dev->dev_addr); |
1505 | } |
1506 | |
1507 | /* This (reset &) enable is not preset in specs or reference driver but |
1508 | * Broadcom does it in arch PCI code when enabling fake PCI device. |
1509 | */ |
1510 | bgmac_clk_enable(bgmac, flags: 0); |
1511 | |
1512 | bgmac_chip_intrs_off(bgmac); |
1513 | |
1514 | /* This seems to be fixing IRQ by assigning OOB #6 to the core */ |
1515 | if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { |
1516 | if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) |
1517 | bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, value: 0x86); |
1518 | } |
1519 | |
1520 | bgmac_chip_reset(bgmac); |
1521 | |
1522 | err = bgmac_dma_alloc(bgmac); |
1523 | if (err) { |
1524 | dev_err(bgmac->dev, "Unable to alloc memory for DMA\n" ); |
1525 | goto err_out; |
1526 | } |
1527 | |
1528 | bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; |
1529 | if (bcm47xx_nvram_getenv(name: "et0_no_txint" , NULL, val_len: 0) == 0) |
1530 | bgmac->int_mask &= ~BGMAC_IS_TX_MASK; |
1531 | |
1532 | netif_napi_add(dev: net_dev, napi: &bgmac->napi, poll: bgmac_poll); |
1533 | |
1534 | err = bgmac_phy_connect(bgmac); |
1535 | if (err) { |
1536 | dev_err(bgmac->dev, "Cannot connect to phy\n" ); |
1537 | goto err_dma_free; |
1538 | } |
1539 | |
1540 | net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1541 | net_dev->hw_features = net_dev->features; |
1542 | net_dev->vlan_features = net_dev->features; |
1543 | |
1544 | /* Omit FCS from max MTU size */ |
1545 | net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN; |
1546 | |
1547 | bgmac->in_init = false; |
1548 | |
1549 | err = register_netdev(dev: bgmac->net_dev); |
1550 | if (err) { |
1551 | dev_err(bgmac->dev, "Cannot register net device\n" ); |
1552 | goto err_phy_disconnect; |
1553 | } |
1554 | |
1555 | netif_carrier_off(dev: net_dev); |
1556 | |
1557 | return 0; |
1558 | |
1559 | err_phy_disconnect: |
1560 | phy_disconnect(phydev: net_dev->phydev); |
1561 | err_dma_free: |
1562 | bgmac_dma_free(bgmac); |
1563 | err_out: |
1564 | |
1565 | return err; |
1566 | } |
1567 | EXPORT_SYMBOL_GPL(bgmac_enet_probe); |
1568 | |
1569 | void bgmac_enet_remove(struct bgmac *bgmac) |
1570 | { |
1571 | unregister_netdev(dev: bgmac->net_dev); |
1572 | phy_disconnect(phydev: bgmac->net_dev->phydev); |
1573 | netif_napi_del(napi: &bgmac->napi); |
1574 | bgmac_dma_free(bgmac); |
1575 | } |
1576 | EXPORT_SYMBOL_GPL(bgmac_enet_remove); |
1577 | |
1578 | int bgmac_enet_suspend(struct bgmac *bgmac) |
1579 | { |
1580 | if (!netif_running(dev: bgmac->net_dev)) |
1581 | return 0; |
1582 | |
1583 | phy_stop(phydev: bgmac->net_dev->phydev); |
1584 | |
1585 | netif_stop_queue(dev: bgmac->net_dev); |
1586 | |
1587 | napi_disable(n: &bgmac->napi); |
1588 | |
1589 | netif_tx_lock(dev: bgmac->net_dev); |
1590 | netif_device_detach(dev: bgmac->net_dev); |
1591 | netif_tx_unlock(dev: bgmac->net_dev); |
1592 | |
1593 | bgmac_chip_intrs_off(bgmac); |
1594 | bgmac_chip_reset(bgmac); |
1595 | bgmac_dma_cleanup(bgmac); |
1596 | |
1597 | return 0; |
1598 | } |
1599 | EXPORT_SYMBOL_GPL(bgmac_enet_suspend); |
1600 | |
1601 | int bgmac_enet_resume(struct bgmac *bgmac) |
1602 | { |
1603 | int rc; |
1604 | |
1605 | if (!netif_running(dev: bgmac->net_dev)) |
1606 | return 0; |
1607 | |
1608 | rc = bgmac_dma_init(bgmac); |
1609 | if (rc) |
1610 | return rc; |
1611 | |
1612 | bgmac_chip_init(bgmac); |
1613 | |
1614 | napi_enable(n: &bgmac->napi); |
1615 | |
1616 | netif_tx_lock(dev: bgmac->net_dev); |
1617 | netif_device_attach(dev: bgmac->net_dev); |
1618 | netif_tx_unlock(dev: bgmac->net_dev); |
1619 | |
1620 | netif_start_queue(dev: bgmac->net_dev); |
1621 | |
1622 | phy_start(phydev: bgmac->net_dev->phydev); |
1623 | |
1624 | return 0; |
1625 | } |
1626 | EXPORT_SYMBOL_GPL(bgmac_enet_resume); |
1627 | |
1628 | MODULE_AUTHOR("Rafał Miłecki" ); |
1629 | MODULE_LICENSE("GPL" ); |
1630 | |