1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Driver for Marvell PPv2 network controller for Armada 375 SoC. |
4 | * |
5 | * Copyright (C) 2014 Marvell |
6 | * |
7 | * Marcin Wojtas <mw@semihalf.com> |
8 | */ |
9 | |
10 | #include <linux/acpi.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/skbuff.h> |
16 | #include <linux/inetdevice.h> |
17 | #include <linux/mbus.h> |
18 | #include <linux/module.h> |
19 | #include <linux/mfd/syscon.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/cpumask.h> |
22 | #include <linux/of.h> |
23 | #include <linux/of_irq.h> |
24 | #include <linux/of_mdio.h> |
25 | #include <linux/of_net.h> |
26 | #include <linux/of_address.h> |
27 | #include <linux/phy.h> |
28 | #include <linux/phylink.h> |
29 | #include <linux/phy/phy.h> |
30 | #include <linux/ptp_classify.h> |
31 | #include <linux/clk.h> |
32 | #include <linux/hrtimer.h> |
33 | #include <linux/ktime.h> |
34 | #include <linux/regmap.h> |
35 | #include <uapi/linux/ppp_defs.h> |
36 | #include <net/ip.h> |
37 | #include <net/ipv6.h> |
38 | #include <net/page_pool/helpers.h> |
39 | #include <net/tso.h> |
40 | #include <linux/bpf_trace.h> |
41 | |
42 | #include "mvpp2.h" |
43 | #include "mvpp2_prs.h" |
44 | #include "mvpp2_cls.h" |
45 | |
46 | enum mvpp2_bm_pool_log_num { |
47 | MVPP2_BM_SHORT, |
48 | MVPP2_BM_LONG, |
49 | MVPP2_BM_JUMBO, |
50 | MVPP2_BM_POOLS_NUM |
51 | }; |
52 | |
53 | static struct { |
54 | int pkt_size; |
55 | int buf_num; |
56 | } mvpp2_pools[MVPP2_BM_POOLS_NUM]; |
57 | |
58 | /* The prototype is added here to be used in start_dev when using ACPI. This |
59 | * will be removed once phylink is used for all modes (dt+ACPI). |
60 | */ |
61 | static void mvpp2_acpi_start(struct mvpp2_port *port); |
62 | |
63 | /* Queue modes */ |
64 | #define MVPP2_QDIST_SINGLE_MODE 0 |
65 | #define MVPP2_QDIST_MULTI_MODE 1 |
66 | |
67 | static int queue_mode = MVPP2_QDIST_MULTI_MODE; |
68 | |
69 | module_param(queue_mode, int, 0444); |
70 | MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)" ); |
71 | |
72 | /* Utility/helper methods */ |
73 | |
74 | void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) |
75 | { |
76 | writel(val: data, addr: priv->swth_base[0] + offset); |
77 | } |
78 | |
79 | u32 mvpp2_read(struct mvpp2 *priv, u32 offset) |
80 | { |
81 | return readl(addr: priv->swth_base[0] + offset); |
82 | } |
83 | |
84 | static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) |
85 | { |
86 | return readl_relaxed(priv->swth_base[0] + offset); |
87 | } |
88 | |
89 | static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) |
90 | { |
91 | return cpu % priv->nthreads; |
92 | } |
93 | |
94 | static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) |
95 | { |
96 | writel(val: data, addr: priv->cm3_base + offset); |
97 | } |
98 | |
99 | static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) |
100 | { |
101 | return readl(addr: priv->cm3_base + offset); |
102 | } |
103 | |
104 | static struct page_pool * |
105 | mvpp2_create_page_pool(struct device *dev, int num, int len, |
106 | enum dma_data_direction dma_dir) |
107 | { |
108 | struct page_pool_params pp_params = { |
109 | /* internal DMA mapping in page_pool */ |
110 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
111 | .pool_size = num, |
112 | .nid = NUMA_NO_NODE, |
113 | .dev = dev, |
114 | .dma_dir = dma_dir, |
115 | .offset = MVPP2_SKB_HEADROOM, |
116 | .max_len = len, |
117 | }; |
118 | |
119 | return page_pool_create(params: &pp_params); |
120 | } |
121 | |
122 | /* These accessors should be used to access: |
123 | * |
124 | * - per-thread registers, where each thread has its own copy of the |
125 | * register. |
126 | * |
127 | * MVPP2_BM_VIRT_ALLOC_REG |
128 | * MVPP2_BM_ADDR_HIGH_ALLOC |
129 | * MVPP22_BM_ADDR_HIGH_RLS_REG |
130 | * MVPP2_BM_VIRT_RLS_REG |
131 | * MVPP2_ISR_RX_TX_CAUSE_REG |
132 | * MVPP2_ISR_RX_TX_MASK_REG |
133 | * MVPP2_TXQ_NUM_REG |
134 | * MVPP2_AGGR_TXQ_UPDATE_REG |
135 | * MVPP2_TXQ_RSVD_REQ_REG |
136 | * MVPP2_TXQ_RSVD_RSLT_REG |
137 | * MVPP2_TXQ_SENT_REG |
138 | * MVPP2_RXQ_NUM_REG |
139 | * |
140 | * - global registers that must be accessed through a specific thread |
141 | * window, because they are related to an access to a per-thread |
142 | * register |
143 | * |
144 | * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) |
145 | * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) |
146 | * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) |
147 | * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) |
148 | * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) |
149 | * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) |
150 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) |
151 | * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) |
152 | * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) |
153 | * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) |
154 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) |
155 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) |
156 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) |
157 | */ |
158 | static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, |
159 | u32 offset, u32 data) |
160 | { |
161 | writel(val: data, addr: priv->swth_base[thread] + offset); |
162 | } |
163 | |
164 | static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, |
165 | u32 offset) |
166 | { |
167 | return readl(addr: priv->swth_base[thread] + offset); |
168 | } |
169 | |
170 | static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, |
171 | u32 offset, u32 data) |
172 | { |
173 | writel_relaxed(data, priv->swth_base[thread] + offset); |
174 | } |
175 | |
176 | static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, |
177 | u32 offset) |
178 | { |
179 | return readl_relaxed(priv->swth_base[thread] + offset); |
180 | } |
181 | |
182 | static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
183 | struct mvpp2_tx_desc *tx_desc) |
184 | { |
185 | if (port->priv->hw_version == MVPP21) |
186 | return le32_to_cpu(tx_desc->pp21.buf_dma_addr); |
187 | else |
188 | return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & |
189 | MVPP2_DESC_DMA_MASK; |
190 | } |
191 | |
192 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
193 | struct mvpp2_tx_desc *tx_desc, |
194 | dma_addr_t dma_addr) |
195 | { |
196 | dma_addr_t addr, offset; |
197 | |
198 | addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; |
199 | offset = dma_addr & MVPP2_TX_DESC_ALIGN; |
200 | |
201 | if (port->priv->hw_version == MVPP21) { |
202 | tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); |
203 | tx_desc->pp21.packet_offset = offset; |
204 | } else { |
205 | __le64 val = cpu_to_le64(addr); |
206 | |
207 | tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); |
208 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
209 | tx_desc->pp22.packet_offset = offset; |
210 | } |
211 | } |
212 | |
213 | static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, |
214 | struct mvpp2_tx_desc *tx_desc) |
215 | { |
216 | if (port->priv->hw_version == MVPP21) |
217 | return le16_to_cpu(tx_desc->pp21.data_size); |
218 | else |
219 | return le16_to_cpu(tx_desc->pp22.data_size); |
220 | } |
221 | |
222 | static void mvpp2_txdesc_size_set(struct mvpp2_port *port, |
223 | struct mvpp2_tx_desc *tx_desc, |
224 | size_t size) |
225 | { |
226 | if (port->priv->hw_version == MVPP21) |
227 | tx_desc->pp21.data_size = cpu_to_le16(size); |
228 | else |
229 | tx_desc->pp22.data_size = cpu_to_le16(size); |
230 | } |
231 | |
232 | static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, |
233 | struct mvpp2_tx_desc *tx_desc, |
234 | unsigned int txq) |
235 | { |
236 | if (port->priv->hw_version == MVPP21) |
237 | tx_desc->pp21.phys_txq = txq; |
238 | else |
239 | tx_desc->pp22.phys_txq = txq; |
240 | } |
241 | |
242 | static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, |
243 | struct mvpp2_tx_desc *tx_desc, |
244 | unsigned int command) |
245 | { |
246 | if (port->priv->hw_version == MVPP21) |
247 | tx_desc->pp21.command = cpu_to_le32(command); |
248 | else |
249 | tx_desc->pp22.command = cpu_to_le32(command); |
250 | } |
251 | |
252 | static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, |
253 | struct mvpp2_tx_desc *tx_desc) |
254 | { |
255 | if (port->priv->hw_version == MVPP21) |
256 | return tx_desc->pp21.packet_offset; |
257 | else |
258 | return tx_desc->pp22.packet_offset; |
259 | } |
260 | |
261 | static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, |
262 | struct mvpp2_rx_desc *rx_desc) |
263 | { |
264 | if (port->priv->hw_version == MVPP21) |
265 | return le32_to_cpu(rx_desc->pp21.buf_dma_addr); |
266 | else |
267 | return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & |
268 | MVPP2_DESC_DMA_MASK; |
269 | } |
270 | |
271 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
272 | struct mvpp2_rx_desc *rx_desc) |
273 | { |
274 | if (port->priv->hw_version == MVPP21) |
275 | return le32_to_cpu(rx_desc->pp21.buf_cookie); |
276 | else |
277 | return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & |
278 | MVPP2_DESC_DMA_MASK; |
279 | } |
280 | |
281 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
282 | struct mvpp2_rx_desc *rx_desc) |
283 | { |
284 | if (port->priv->hw_version == MVPP21) |
285 | return le16_to_cpu(rx_desc->pp21.data_size); |
286 | else |
287 | return le16_to_cpu(rx_desc->pp22.data_size); |
288 | } |
289 | |
290 | static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, |
291 | struct mvpp2_rx_desc *rx_desc) |
292 | { |
293 | if (port->priv->hw_version == MVPP21) |
294 | return le32_to_cpu(rx_desc->pp21.status); |
295 | else |
296 | return le32_to_cpu(rx_desc->pp22.status); |
297 | } |
298 | |
299 | static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) |
300 | { |
301 | txq_pcpu->txq_get_index++; |
302 | if (txq_pcpu->txq_get_index == txq_pcpu->size) |
303 | txq_pcpu->txq_get_index = 0; |
304 | } |
305 | |
306 | static void mvpp2_txq_inc_put(struct mvpp2_port *port, |
307 | struct mvpp2_txq_pcpu *txq_pcpu, |
308 | void *data, |
309 | struct mvpp2_tx_desc *tx_desc, |
310 | enum mvpp2_tx_buf_type buf_type) |
311 | { |
312 | struct mvpp2_txq_pcpu_buf *tx_buf = |
313 | txq_pcpu->buffs + txq_pcpu->txq_put_index; |
314 | tx_buf->type = buf_type; |
315 | if (buf_type == MVPP2_TYPE_SKB) |
316 | tx_buf->skb = data; |
317 | else |
318 | tx_buf->xdpf = data; |
319 | tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); |
320 | tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + |
321 | mvpp2_txdesc_offset_get(port, tx_desc); |
322 | txq_pcpu->txq_put_index++; |
323 | if (txq_pcpu->txq_put_index == txq_pcpu->size) |
324 | txq_pcpu->txq_put_index = 0; |
325 | } |
326 | |
327 | /* Get number of maximum RXQ */ |
328 | static int mvpp2_get_nrxqs(struct mvpp2 *priv) |
329 | { |
330 | unsigned int nrxqs; |
331 | |
332 | if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) |
333 | return 1; |
334 | |
335 | /* According to the PPv2.2 datasheet and our experiments on |
336 | * PPv2.1, RX queues have an allocation granularity of 4 (when |
337 | * more than a single one on PPv2.2). |
338 | * Round up to nearest multiple of 4. |
339 | */ |
340 | nrxqs = (num_possible_cpus() + 3) & ~0x3; |
341 | if (nrxqs > MVPP2_PORT_MAX_RXQ) |
342 | nrxqs = MVPP2_PORT_MAX_RXQ; |
343 | |
344 | return nrxqs; |
345 | } |
346 | |
347 | /* Get number of physical egress port */ |
348 | static inline int mvpp2_egress_port(struct mvpp2_port *port) |
349 | { |
350 | return MVPP2_MAX_TCONT + port->id; |
351 | } |
352 | |
353 | /* Get number of physical TXQ */ |
354 | static inline int mvpp2_txq_phys(int port, int txq) |
355 | { |
356 | return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; |
357 | } |
358 | |
359 | /* Returns a struct page if page_pool is set, otherwise a buffer */ |
360 | static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, |
361 | struct page_pool *page_pool) |
362 | { |
363 | if (page_pool) |
364 | return page_pool_dev_alloc_pages(pool: page_pool); |
365 | |
366 | if (likely(pool->frag_size <= PAGE_SIZE)) |
367 | return netdev_alloc_frag(fragsz: pool->frag_size); |
368 | |
369 | return kmalloc(size: pool->frag_size, GFP_ATOMIC); |
370 | } |
371 | |
372 | static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, |
373 | struct page_pool *page_pool, void *data) |
374 | { |
375 | if (page_pool) |
376 | page_pool_put_full_page(pool: page_pool, page: virt_to_head_page(x: data), allow_direct: false); |
377 | else if (likely(pool->frag_size <= PAGE_SIZE)) |
378 | skb_free_frag(addr: data); |
379 | else |
380 | kfree(objp: data); |
381 | } |
382 | |
383 | /* Buffer Manager configuration routines */ |
384 | |
385 | /* Create pool */ |
386 | static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, |
387 | struct mvpp2_bm_pool *bm_pool, int size) |
388 | { |
389 | u32 val; |
390 | |
391 | /* Number of buffer pointers must be a multiple of 16, as per |
392 | * hardware constraints |
393 | */ |
394 | if (!IS_ALIGNED(size, 16)) |
395 | return -EINVAL; |
396 | |
397 | /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 |
398 | * bytes per buffer pointer |
399 | */ |
400 | if (priv->hw_version == MVPP21) |
401 | bm_pool->size_bytes = 2 * sizeof(u32) * size; |
402 | else |
403 | bm_pool->size_bytes = 2 * sizeof(u64) * size; |
404 | |
405 | bm_pool->virt_addr = dma_alloc_coherent(dev, size: bm_pool->size_bytes, |
406 | dma_handle: &bm_pool->dma_addr, |
407 | GFP_KERNEL); |
408 | if (!bm_pool->virt_addr) |
409 | return -ENOMEM; |
410 | |
411 | if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, |
412 | MVPP2_BM_POOL_PTR_ALIGN)) { |
413 | dma_free_coherent(dev, size: bm_pool->size_bytes, |
414 | cpu_addr: bm_pool->virt_addr, dma_handle: bm_pool->dma_addr); |
415 | dev_err(dev, "BM pool %d is not %d bytes aligned\n" , |
416 | bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); |
417 | return -ENOMEM; |
418 | } |
419 | |
420 | mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), |
421 | lower_32_bits(bm_pool->dma_addr)); |
422 | mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), data: size); |
423 | |
424 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); |
425 | val |= MVPP2_BM_START_MASK; |
426 | |
427 | val &= ~MVPP2_BM_LOW_THRESH_MASK; |
428 | val &= ~MVPP2_BM_HIGH_THRESH_MASK; |
429 | |
430 | /* Set 8 Pools BPPI threshold for MVPP23 */ |
431 | if (priv->hw_version == MVPP23) { |
432 | val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); |
433 | val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); |
434 | } else { |
435 | val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); |
436 | val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); |
437 | } |
438 | |
439 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), data: val); |
440 | |
441 | bm_pool->size = size; |
442 | bm_pool->pkt_size = 0; |
443 | bm_pool->buf_num = 0; |
444 | |
445 | return 0; |
446 | } |
447 | |
448 | /* Set pool buffer size */ |
449 | static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, |
450 | struct mvpp2_bm_pool *bm_pool, |
451 | int buf_size) |
452 | { |
453 | u32 val; |
454 | |
455 | bm_pool->buf_size = buf_size; |
456 | |
457 | val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); |
458 | mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), data: val); |
459 | } |
460 | |
461 | static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, |
462 | struct mvpp2_bm_pool *bm_pool, |
463 | dma_addr_t *dma_addr, |
464 | phys_addr_t *phys_addr) |
465 | { |
466 | unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); |
467 | |
468 | *dma_addr = mvpp2_thread_read(priv, thread, |
469 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
470 | *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); |
471 | |
472 | if (priv->hw_version >= MVPP22) { |
473 | u32 val; |
474 | u32 dma_addr_highbits, phys_addr_highbits; |
475 | |
476 | val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); |
477 | dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); |
478 | phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> |
479 | MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; |
480 | |
481 | if (sizeof(dma_addr_t) == 8) |
482 | *dma_addr |= (u64)dma_addr_highbits << 32; |
483 | |
484 | if (sizeof(phys_addr_t) == 8) |
485 | *phys_addr |= (u64)phys_addr_highbits << 32; |
486 | } |
487 | |
488 | put_cpu(); |
489 | } |
490 | |
491 | /* Free all buffers from the pool */ |
492 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
493 | struct mvpp2_bm_pool *bm_pool, int buf_num) |
494 | { |
495 | struct page_pool *pp = NULL; |
496 | int i; |
497 | |
498 | if (buf_num > bm_pool->buf_num) { |
499 | WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n" , |
500 | bm_pool->id, buf_num); |
501 | buf_num = bm_pool->buf_num; |
502 | } |
503 | |
504 | if (priv->percpu_pools) |
505 | pp = priv->page_pool[bm_pool->id]; |
506 | |
507 | for (i = 0; i < buf_num; i++) { |
508 | dma_addr_t buf_dma_addr; |
509 | phys_addr_t buf_phys_addr; |
510 | void *data; |
511 | |
512 | mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, |
513 | dma_addr: &buf_dma_addr, phys_addr: &buf_phys_addr); |
514 | |
515 | if (!pp) |
516 | dma_unmap_single(dev, buf_dma_addr, |
517 | bm_pool->buf_size, DMA_FROM_DEVICE); |
518 | |
519 | data = (void *)phys_to_virt(address: buf_phys_addr); |
520 | if (!data) |
521 | break; |
522 | |
523 | mvpp2_frag_free(pool: bm_pool, page_pool: pp, data); |
524 | } |
525 | |
526 | /* Update BM driver with number of buffers removed from pool */ |
527 | bm_pool->buf_num -= i; |
528 | } |
529 | |
530 | /* Check number of buffers in BM pool */ |
531 | static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) |
532 | { |
533 | int buf_num = 0; |
534 | |
535 | buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & |
536 | MVPP22_BM_POOL_PTRS_NUM_MASK; |
537 | buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & |
538 | MVPP2_BM_BPPI_PTR_NUM_MASK; |
539 | |
540 | /* HW has one buffer ready which is not reflected in the counters */ |
541 | if (buf_num) |
542 | buf_num += 1; |
543 | |
544 | return buf_num; |
545 | } |
546 | |
547 | /* Cleanup pool */ |
548 | static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, |
549 | struct mvpp2_bm_pool *bm_pool) |
550 | { |
551 | int buf_num; |
552 | u32 val; |
553 | |
554 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
555 | mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); |
556 | |
557 | /* Check buffer counters after free */ |
558 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
559 | if (buf_num) { |
560 | WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n" , |
561 | bm_pool->id, bm_pool->buf_num); |
562 | return 0; |
563 | } |
564 | |
565 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); |
566 | val |= MVPP2_BM_STOP_MASK; |
567 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), data: val); |
568 | |
569 | if (priv->percpu_pools) { |
570 | page_pool_destroy(pool: priv->page_pool[bm_pool->id]); |
571 | priv->page_pool[bm_pool->id] = NULL; |
572 | } |
573 | |
574 | dma_free_coherent(dev, size: bm_pool->size_bytes, |
575 | cpu_addr: bm_pool->virt_addr, |
576 | dma_handle: bm_pool->dma_addr); |
577 | return 0; |
578 | } |
579 | |
580 | static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) |
581 | { |
582 | int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; |
583 | struct mvpp2_bm_pool *bm_pool; |
584 | |
585 | if (priv->percpu_pools) |
586 | poolnum = mvpp2_get_nrxqs(priv) * 2; |
587 | |
588 | /* Create all pools with maximum size */ |
589 | size = MVPP2_BM_POOL_SIZE_MAX; |
590 | for (i = 0; i < poolnum; i++) { |
591 | bm_pool = &priv->bm_pools[i]; |
592 | bm_pool->id = i; |
593 | err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); |
594 | if (err) |
595 | goto err_unroll_pools; |
596 | mvpp2_bm_pool_bufsize_set(priv, bm_pool, buf_size: 0); |
597 | } |
598 | return 0; |
599 | |
600 | err_unroll_pools: |
601 | dev_err(dev, "failed to create BM pool %d, size %d\n" , i, size); |
602 | for (i = i - 1; i >= 0; i--) |
603 | mvpp2_bm_pool_destroy(dev, priv, bm_pool: &priv->bm_pools[i]); |
604 | return err; |
605 | } |
606 | |
607 | /* Routine enable PPv23 8 pool mode */ |
608 | static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) |
609 | { |
610 | int val; |
611 | |
612 | val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); |
613 | val |= MVPP23_BM_8POOL_MODE; |
614 | mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, data: val); |
615 | } |
616 | |
617 | /* Cleanup pool before actual initialization in the OS */ |
618 | static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id) |
619 | { |
620 | unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); |
621 | u32 val; |
622 | int i; |
623 | |
624 | /* Drain the BM from all possible residues left by firmware */ |
625 | for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++) |
626 | mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id)); |
627 | |
628 | put_cpu(); |
629 | |
630 | /* Stop the BM pool */ |
631 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id)); |
632 | val |= MVPP2_BM_STOP_MASK; |
633 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), data: val); |
634 | } |
635 | |
636 | static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) |
637 | { |
638 | enum dma_data_direction dma_dir = DMA_FROM_DEVICE; |
639 | int i, err, poolnum = MVPP2_BM_POOLS_NUM; |
640 | struct mvpp2_port *port; |
641 | |
642 | if (priv->percpu_pools) |
643 | poolnum = mvpp2_get_nrxqs(priv) * 2; |
644 | |
645 | /* Clean up the pool state in case it contains stale state */ |
646 | for (i = 0; i < poolnum; i++) |
647 | mvpp2_bm_pool_cleanup(priv, pool_id: i); |
648 | |
649 | if (priv->percpu_pools) { |
650 | for (i = 0; i < priv->port_count; i++) { |
651 | port = priv->port_list[i]; |
652 | if (port->xdp_prog) { |
653 | dma_dir = DMA_BIDIRECTIONAL; |
654 | break; |
655 | } |
656 | } |
657 | |
658 | for (i = 0; i < poolnum; i++) { |
659 | /* the pool in use */ |
660 | int pn = i / (poolnum / 2); |
661 | |
662 | priv->page_pool[i] = |
663 | mvpp2_create_page_pool(dev, |
664 | num: mvpp2_pools[pn].buf_num, |
665 | len: mvpp2_pools[pn].pkt_size, |
666 | dma_dir); |
667 | if (IS_ERR(ptr: priv->page_pool[i])) { |
668 | int j; |
669 | |
670 | for (j = 0; j < i; j++) { |
671 | page_pool_destroy(pool: priv->page_pool[j]); |
672 | priv->page_pool[j] = NULL; |
673 | } |
674 | return PTR_ERR(ptr: priv->page_pool[i]); |
675 | } |
676 | } |
677 | } |
678 | |
679 | dev_info(dev, "using %d %s buffers\n" , poolnum, |
680 | priv->percpu_pools ? "per-cpu" : "shared" ); |
681 | |
682 | for (i = 0; i < poolnum; i++) { |
683 | /* Mask BM all interrupts */ |
684 | mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), data: 0); |
685 | /* Clear BM cause register */ |
686 | mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), data: 0); |
687 | } |
688 | |
689 | /* Allocate and initialize BM pools */ |
690 | priv->bm_pools = devm_kcalloc(dev, n: poolnum, |
691 | size: sizeof(*priv->bm_pools), GFP_KERNEL); |
692 | if (!priv->bm_pools) |
693 | return -ENOMEM; |
694 | |
695 | if (priv->hw_version == MVPP23) |
696 | mvpp23_bm_set_8pool_mode(priv); |
697 | |
698 | err = mvpp2_bm_pools_init(dev, priv); |
699 | if (err < 0) |
700 | return err; |
701 | return 0; |
702 | } |
703 | |
704 | static void mvpp2_setup_bm_pool(void) |
705 | { |
706 | /* Short pool */ |
707 | mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; |
708 | mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; |
709 | |
710 | /* Long pool */ |
711 | mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; |
712 | mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; |
713 | |
714 | /* Jumbo pool */ |
715 | mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; |
716 | mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; |
717 | } |
718 | |
719 | /* Attach long pool to rxq */ |
720 | static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, |
721 | int lrxq, int long_pool) |
722 | { |
723 | u32 val, mask; |
724 | int prxq; |
725 | |
726 | /* Get queue physical ID */ |
727 | prxq = port->rxqs[lrxq]->id; |
728 | |
729 | if (port->priv->hw_version == MVPP21) |
730 | mask = MVPP21_RXQ_POOL_LONG_MASK; |
731 | else |
732 | mask = MVPP22_RXQ_POOL_LONG_MASK; |
733 | |
734 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
735 | val &= ~mask; |
736 | val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; |
737 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq), data: val); |
738 | } |
739 | |
740 | /* Attach short pool to rxq */ |
741 | static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, |
742 | int lrxq, int short_pool) |
743 | { |
744 | u32 val, mask; |
745 | int prxq; |
746 | |
747 | /* Get queue physical ID */ |
748 | prxq = port->rxqs[lrxq]->id; |
749 | |
750 | if (port->priv->hw_version == MVPP21) |
751 | mask = MVPP21_RXQ_POOL_SHORT_MASK; |
752 | else |
753 | mask = MVPP22_RXQ_POOL_SHORT_MASK; |
754 | |
755 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
756 | val &= ~mask; |
757 | val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; |
758 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq), data: val); |
759 | } |
760 | |
761 | static void *mvpp2_buf_alloc(struct mvpp2_port *port, |
762 | struct mvpp2_bm_pool *bm_pool, |
763 | struct page_pool *page_pool, |
764 | dma_addr_t *buf_dma_addr, |
765 | phys_addr_t *buf_phys_addr, |
766 | gfp_t gfp_mask) |
767 | { |
768 | dma_addr_t dma_addr; |
769 | struct page *page; |
770 | void *data; |
771 | |
772 | data = mvpp2_frag_alloc(pool: bm_pool, page_pool); |
773 | if (!data) |
774 | return NULL; |
775 | |
776 | if (page_pool) { |
777 | page = (struct page *)data; |
778 | dma_addr = page_pool_get_dma_addr(page); |
779 | data = page_to_virt(page); |
780 | } else { |
781 | dma_addr = dma_map_single(port->dev->dev.parent, data, |
782 | MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), |
783 | DMA_FROM_DEVICE); |
784 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { |
785 | mvpp2_frag_free(pool: bm_pool, NULL, data); |
786 | return NULL; |
787 | } |
788 | } |
789 | *buf_dma_addr = dma_addr; |
790 | *buf_phys_addr = virt_to_phys(address: data); |
791 | |
792 | return data; |
793 | } |
794 | |
795 | /* Routine enable flow control for RXQs condition */ |
796 | static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) |
797 | { |
798 | int val, cm3_state, host_id, q; |
799 | int fq = port->first_rxq; |
800 | unsigned long flags; |
801 | |
802 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); |
803 | |
804 | /* Remove Flow control enable bit to prevent race between FW and Kernel |
805 | * If Flow control was enabled, it would be re-enabled. |
806 | */ |
807 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
808 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); |
809 | val &= ~FLOW_CONTROL_ENABLE_BIT; |
810 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
811 | |
812 | /* Set same Flow control for all RXQs */ |
813 | for (q = 0; q < port->nrxqs; q++) { |
814 | /* Set stop and start Flow control RXQ thresholds */ |
815 | val = MSS_THRESHOLD_START; |
816 | val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); |
817 | mvpp2_cm3_write(priv: port->priv, MSS_RXQ_TRESH_REG(q, fq), data: val); |
818 | |
819 | val = mvpp2_cm3_read(priv: port->priv, MSS_RXQ_ASS_REG(q, fq)); |
820 | /* Set RXQ port ID */ |
821 | val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); |
822 | val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); |
823 | val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) |
824 | + MSS_RXQ_ASS_HOSTID_OFFS)); |
825 | |
826 | /* Calculate RXQ host ID: |
827 | * In Single queue mode: Host ID equal to Host ID used for |
828 | * shared RX interrupt |
829 | * In Multi queue mode: Host ID equal to number of |
830 | * RXQ ID / number of CoS queues |
831 | * In Single resource mode: Host ID always equal to 0 |
832 | */ |
833 | if (queue_mode == MVPP2_QDIST_SINGLE_MODE) |
834 | host_id = port->nqvecs; |
835 | else if (queue_mode == MVPP2_QDIST_MULTI_MODE) |
836 | host_id = q; |
837 | else |
838 | host_id = 0; |
839 | |
840 | /* Set RXQ host ID */ |
841 | val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) |
842 | + MSS_RXQ_ASS_HOSTID_OFFS)); |
843 | |
844 | mvpp2_cm3_write(priv: port->priv, MSS_RXQ_ASS_REG(q, fq), data: val); |
845 | } |
846 | |
847 | /* Notify Firmware that Flow control config space ready for update */ |
848 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
849 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; |
850 | val |= cm3_state; |
851 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
852 | |
853 | spin_unlock_irqrestore(lock: &port->priv->mss_spinlock, flags); |
854 | } |
855 | |
856 | /* Routine disable flow control for RXQs condition */ |
857 | static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) |
858 | { |
859 | int val, cm3_state, q; |
860 | unsigned long flags; |
861 | int fq = port->first_rxq; |
862 | |
863 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); |
864 | |
865 | /* Remove Flow control enable bit to prevent race between FW and Kernel |
866 | * If Flow control was enabled, it would be re-enabled. |
867 | */ |
868 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
869 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); |
870 | val &= ~FLOW_CONTROL_ENABLE_BIT; |
871 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
872 | |
873 | /* Disable Flow control for all RXQs */ |
874 | for (q = 0; q < port->nrxqs; q++) { |
875 | /* Set threshold 0 to disable Flow control */ |
876 | val = 0; |
877 | val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); |
878 | mvpp2_cm3_write(priv: port->priv, MSS_RXQ_TRESH_REG(q, fq), data: val); |
879 | |
880 | val = mvpp2_cm3_read(priv: port->priv, MSS_RXQ_ASS_REG(q, fq)); |
881 | |
882 | val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); |
883 | |
884 | val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) |
885 | + MSS_RXQ_ASS_HOSTID_OFFS)); |
886 | |
887 | mvpp2_cm3_write(priv: port->priv, MSS_RXQ_ASS_REG(q, fq), data: val); |
888 | } |
889 | |
890 | /* Notify Firmware that Flow control config space ready for update */ |
891 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
892 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; |
893 | val |= cm3_state; |
894 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
895 | |
896 | spin_unlock_irqrestore(lock: &port->priv->mss_spinlock, flags); |
897 | } |
898 | |
899 | /* Routine disable/enable flow control for BM pool condition */ |
900 | static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, |
901 | struct mvpp2_bm_pool *pool, |
902 | bool en) |
903 | { |
904 | int val, cm3_state; |
905 | unsigned long flags; |
906 | |
907 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); |
908 | |
909 | /* Remove Flow control enable bit to prevent race between FW and Kernel |
910 | * If Flow control were enabled, it would be re-enabled. |
911 | */ |
912 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
913 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); |
914 | val &= ~FLOW_CONTROL_ENABLE_BIT; |
915 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
916 | |
917 | /* Check if BM pool should be enabled/disable */ |
918 | if (en) { |
919 | /* Set BM pool start and stop thresholds per port */ |
920 | val = mvpp2_cm3_read(priv: port->priv, MSS_BUF_POOL_REG(pool->id)); |
921 | val |= MSS_BUF_POOL_PORT_OFFS(port->id); |
922 | val &= ~MSS_BUF_POOL_START_MASK; |
923 | val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); |
924 | val &= ~MSS_BUF_POOL_STOP_MASK; |
925 | val |= MSS_THRESHOLD_STOP; |
926 | mvpp2_cm3_write(priv: port->priv, MSS_BUF_POOL_REG(pool->id), data: val); |
927 | } else { |
928 | /* Remove BM pool from the port */ |
929 | val = mvpp2_cm3_read(priv: port->priv, MSS_BUF_POOL_REG(pool->id)); |
930 | val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); |
931 | |
932 | /* Zero BM pool start and stop thresholds to disable pool |
933 | * flow control if pool empty (not used by any port) |
934 | */ |
935 | if (!pool->buf_num) { |
936 | val &= ~MSS_BUF_POOL_START_MASK; |
937 | val &= ~MSS_BUF_POOL_STOP_MASK; |
938 | } |
939 | |
940 | mvpp2_cm3_write(priv: port->priv, MSS_BUF_POOL_REG(pool->id), data: val); |
941 | } |
942 | |
943 | /* Notify Firmware that Flow control config space ready for update */ |
944 | val = mvpp2_cm3_read(priv: port->priv, MSS_FC_COM_REG); |
945 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; |
946 | val |= cm3_state; |
947 | mvpp2_cm3_write(priv: port->priv, MSS_FC_COM_REG, data: val); |
948 | |
949 | spin_unlock_irqrestore(lock: &port->priv->mss_spinlock, flags); |
950 | } |
951 | |
952 | /* disable/enable flow control for BM pool on all ports */ |
953 | static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) |
954 | { |
955 | struct mvpp2_port *port; |
956 | int i; |
957 | |
958 | for (i = 0; i < priv->port_count; i++) { |
959 | port = priv->port_list[i]; |
960 | if (port->priv->percpu_pools) { |
961 | for (i = 0; i < port->nrxqs; i++) |
962 | mvpp2_bm_pool_update_fc(port, pool: &port->priv->bm_pools[i], |
963 | en: port->tx_fc & en); |
964 | } else { |
965 | mvpp2_bm_pool_update_fc(port, pool: port->pool_long, en: port->tx_fc & en); |
966 | mvpp2_bm_pool_update_fc(port, pool: port->pool_short, en: port->tx_fc & en); |
967 | } |
968 | } |
969 | } |
970 | |
971 | static int mvpp2_enable_global_fc(struct mvpp2 *priv) |
972 | { |
973 | int val, timeout = 0; |
974 | |
975 | /* Enable global flow control. In this stage global |
976 | * flow control enabled, but still disabled per port. |
977 | */ |
978 | val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); |
979 | val |= FLOW_CONTROL_ENABLE_BIT; |
980 | mvpp2_cm3_write(priv, MSS_FC_COM_REG, data: val); |
981 | |
982 | /* Check if Firmware running and disable FC if not*/ |
983 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; |
984 | mvpp2_cm3_write(priv, MSS_FC_COM_REG, data: val); |
985 | |
986 | while (timeout < MSS_FC_MAX_TIMEOUT) { |
987 | val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); |
988 | |
989 | if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) |
990 | return 0; |
991 | usleep_range(min: 10, max: 20); |
992 | timeout++; |
993 | } |
994 | |
995 | priv->global_tx_fc = false; |
996 | return -EOPNOTSUPP; |
997 | } |
998 | |
999 | /* Release buffer to BM */ |
1000 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, |
1001 | dma_addr_t buf_dma_addr, |
1002 | phys_addr_t buf_phys_addr) |
1003 | { |
1004 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
1005 | unsigned long flags = 0; |
1006 | |
1007 | if (test_bit(thread, &port->priv->lock_map)) |
1008 | spin_lock_irqsave(&port->bm_lock[thread], flags); |
1009 | |
1010 | if (port->priv->hw_version >= MVPP22) { |
1011 | u32 val = 0; |
1012 | |
1013 | if (sizeof(dma_addr_t) == 8) |
1014 | val |= upper_32_bits(buf_dma_addr) & |
1015 | MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; |
1016 | |
1017 | if (sizeof(phys_addr_t) == 8) |
1018 | val |= (upper_32_bits(buf_phys_addr) |
1019 | << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & |
1020 | MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; |
1021 | |
1022 | mvpp2_thread_write_relaxed(priv: port->priv, thread, |
1023 | MVPP22_BM_ADDR_HIGH_RLS_REG, data: val); |
1024 | } |
1025 | |
1026 | /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply |
1027 | * returned in the "cookie" field of the RX |
1028 | * descriptor. Instead of storing the virtual address, we |
1029 | * store the physical address |
1030 | */ |
1031 | mvpp2_thread_write_relaxed(priv: port->priv, thread, |
1032 | MVPP2_BM_VIRT_RLS_REG, data: buf_phys_addr); |
1033 | mvpp2_thread_write_relaxed(priv: port->priv, thread, |
1034 | MVPP2_BM_PHY_RLS_REG(pool), data: buf_dma_addr); |
1035 | |
1036 | if (test_bit(thread, &port->priv->lock_map)) |
1037 | spin_unlock_irqrestore(lock: &port->bm_lock[thread], flags); |
1038 | |
1039 | put_cpu(); |
1040 | } |
1041 | |
1042 | /* Allocate buffers for the pool */ |
1043 | static int mvpp2_bm_bufs_add(struct mvpp2_port *port, |
1044 | struct mvpp2_bm_pool *bm_pool, int buf_num) |
1045 | { |
1046 | int i, buf_size, total_size; |
1047 | dma_addr_t dma_addr; |
1048 | phys_addr_t phys_addr; |
1049 | struct page_pool *pp = NULL; |
1050 | void *buf; |
1051 | |
1052 | if (port->priv->percpu_pools && |
1053 | bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { |
1054 | netdev_err(dev: port->dev, |
1055 | format: "attempted to use jumbo frames with per-cpu pools" ); |
1056 | return 0; |
1057 | } |
1058 | |
1059 | buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); |
1060 | total_size = MVPP2_RX_TOTAL_SIZE(buf_size); |
1061 | |
1062 | if (buf_num < 0 || |
1063 | (buf_num + bm_pool->buf_num > bm_pool->size)) { |
1064 | netdev_err(dev: port->dev, |
1065 | format: "cannot allocate %d buffers for pool %d\n" , |
1066 | buf_num, bm_pool->id); |
1067 | return 0; |
1068 | } |
1069 | |
1070 | if (port->priv->percpu_pools) |
1071 | pp = port->priv->page_pool[bm_pool->id]; |
1072 | for (i = 0; i < buf_num; i++) { |
1073 | buf = mvpp2_buf_alloc(port, bm_pool, page_pool: pp, buf_dma_addr: &dma_addr, |
1074 | buf_phys_addr: &phys_addr, GFP_KERNEL); |
1075 | if (!buf) |
1076 | break; |
1077 | |
1078 | mvpp2_bm_pool_put(port, pool: bm_pool->id, buf_dma_addr: dma_addr, |
1079 | buf_phys_addr: phys_addr); |
1080 | } |
1081 | |
1082 | /* Update BM driver with number of buffers added to pool */ |
1083 | bm_pool->buf_num += i; |
1084 | |
1085 | netdev_dbg(port->dev, |
1086 | "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n" , |
1087 | bm_pool->id, bm_pool->pkt_size, buf_size, total_size); |
1088 | |
1089 | netdev_dbg(port->dev, |
1090 | "pool %d: %d of %d buffers added\n" , |
1091 | bm_pool->id, i, buf_num); |
1092 | return i; |
1093 | } |
1094 | |
1095 | /* Notify the driver that BM pool is being used as specific type and return the |
1096 | * pool pointer on success |
1097 | */ |
1098 | static struct mvpp2_bm_pool * |
1099 | mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) |
1100 | { |
1101 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
1102 | int num; |
1103 | |
1104 | if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(priv: port->priv) * 2) || |
1105 | (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { |
1106 | netdev_err(dev: port->dev, format: "Invalid pool %d\n" , pool); |
1107 | return NULL; |
1108 | } |
1109 | |
1110 | /* Allocate buffers in case BM pool is used as long pool, but packet |
1111 | * size doesn't match MTU or BM pool hasn't being used yet |
1112 | */ |
1113 | if (new_pool->pkt_size == 0) { |
1114 | int pkts_num; |
1115 | |
1116 | /* Set default buffer number or free all the buffers in case |
1117 | * the pool is not empty |
1118 | */ |
1119 | pkts_num = new_pool->buf_num; |
1120 | if (pkts_num == 0) { |
1121 | if (port->priv->percpu_pools) { |
1122 | if (pool < port->nrxqs) |
1123 | pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; |
1124 | else |
1125 | pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; |
1126 | } else { |
1127 | pkts_num = mvpp2_pools[pool].buf_num; |
1128 | } |
1129 | } else { |
1130 | mvpp2_bm_bufs_free(dev: port->dev->dev.parent, |
1131 | priv: port->priv, bm_pool: new_pool, buf_num: pkts_num); |
1132 | } |
1133 | |
1134 | new_pool->pkt_size = pkt_size; |
1135 | new_pool->frag_size = |
1136 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + |
1137 | MVPP2_SKB_SHINFO_SIZE; |
1138 | |
1139 | /* Allocate buffers for this pool */ |
1140 | num = mvpp2_bm_bufs_add(port, bm_pool: new_pool, buf_num: pkts_num); |
1141 | if (num != pkts_num) { |
1142 | WARN(1, "pool %d: %d of %d allocated\n" , |
1143 | new_pool->id, num, pkts_num); |
1144 | return NULL; |
1145 | } |
1146 | } |
1147 | |
1148 | mvpp2_bm_pool_bufsize_set(priv: port->priv, bm_pool: new_pool, |
1149 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); |
1150 | |
1151 | return new_pool; |
1152 | } |
1153 | |
1154 | static struct mvpp2_bm_pool * |
1155 | mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, |
1156 | unsigned int pool, int pkt_size) |
1157 | { |
1158 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
1159 | int num; |
1160 | |
1161 | if (pool > port->nrxqs * 2) { |
1162 | netdev_err(dev: port->dev, format: "Invalid pool %d\n" , pool); |
1163 | return NULL; |
1164 | } |
1165 | |
1166 | /* Allocate buffers in case BM pool is used as long pool, but packet |
1167 | * size doesn't match MTU or BM pool hasn't being used yet |
1168 | */ |
1169 | if (new_pool->pkt_size == 0) { |
1170 | int pkts_num; |
1171 | |
1172 | /* Set default buffer number or free all the buffers in case |
1173 | * the pool is not empty |
1174 | */ |
1175 | pkts_num = new_pool->buf_num; |
1176 | if (pkts_num == 0) |
1177 | pkts_num = mvpp2_pools[type].buf_num; |
1178 | else |
1179 | mvpp2_bm_bufs_free(dev: port->dev->dev.parent, |
1180 | priv: port->priv, bm_pool: new_pool, buf_num: pkts_num); |
1181 | |
1182 | new_pool->pkt_size = pkt_size; |
1183 | new_pool->frag_size = |
1184 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + |
1185 | MVPP2_SKB_SHINFO_SIZE; |
1186 | |
1187 | /* Allocate buffers for this pool */ |
1188 | num = mvpp2_bm_bufs_add(port, bm_pool: new_pool, buf_num: pkts_num); |
1189 | if (num != pkts_num) { |
1190 | WARN(1, "pool %d: %d of %d allocated\n" , |
1191 | new_pool->id, num, pkts_num); |
1192 | return NULL; |
1193 | } |
1194 | } |
1195 | |
1196 | mvpp2_bm_pool_bufsize_set(priv: port->priv, bm_pool: new_pool, |
1197 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); |
1198 | |
1199 | return new_pool; |
1200 | } |
1201 | |
1202 | /* Initialize pools for swf, shared buffers variant */ |
1203 | static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) |
1204 | { |
1205 | enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; |
1206 | int rxq; |
1207 | |
1208 | /* If port pkt_size is higher than 1518B: |
1209 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool |
1210 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool |
1211 | */ |
1212 | if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { |
1213 | long_log_pool = MVPP2_BM_JUMBO; |
1214 | short_log_pool = MVPP2_BM_LONG; |
1215 | } else { |
1216 | long_log_pool = MVPP2_BM_LONG; |
1217 | short_log_pool = MVPP2_BM_SHORT; |
1218 | } |
1219 | |
1220 | if (!port->pool_long) { |
1221 | port->pool_long = |
1222 | mvpp2_bm_pool_use(port, pool: long_log_pool, |
1223 | pkt_size: mvpp2_pools[long_log_pool].pkt_size); |
1224 | if (!port->pool_long) |
1225 | return -ENOMEM; |
1226 | |
1227 | port->pool_long->port_map |= BIT(port->id); |
1228 | |
1229 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
1230 | mvpp2_rxq_long_pool_set(port, lrxq: rxq, long_pool: port->pool_long->id); |
1231 | } |
1232 | |
1233 | if (!port->pool_short) { |
1234 | port->pool_short = |
1235 | mvpp2_bm_pool_use(port, pool: short_log_pool, |
1236 | pkt_size: mvpp2_pools[short_log_pool].pkt_size); |
1237 | if (!port->pool_short) |
1238 | return -ENOMEM; |
1239 | |
1240 | port->pool_short->port_map |= BIT(port->id); |
1241 | |
1242 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
1243 | mvpp2_rxq_short_pool_set(port, lrxq: rxq, |
1244 | short_pool: port->pool_short->id); |
1245 | } |
1246 | |
1247 | return 0; |
1248 | } |
1249 | |
1250 | /* Initialize pools for swf, percpu buffers variant */ |
1251 | static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) |
1252 | { |
1253 | struct mvpp2_bm_pool *bm_pool; |
1254 | int i; |
1255 | |
1256 | for (i = 0; i < port->nrxqs; i++) { |
1257 | bm_pool = mvpp2_bm_pool_use_percpu(port, type: MVPP2_BM_SHORT, pool: i, |
1258 | pkt_size: mvpp2_pools[MVPP2_BM_SHORT].pkt_size); |
1259 | if (!bm_pool) |
1260 | return -ENOMEM; |
1261 | |
1262 | bm_pool->port_map |= BIT(port->id); |
1263 | mvpp2_rxq_short_pool_set(port, lrxq: i, short_pool: bm_pool->id); |
1264 | } |
1265 | |
1266 | for (i = 0; i < port->nrxqs; i++) { |
1267 | bm_pool = mvpp2_bm_pool_use_percpu(port, type: MVPP2_BM_LONG, pool: i + port->nrxqs, |
1268 | pkt_size: mvpp2_pools[MVPP2_BM_LONG].pkt_size); |
1269 | if (!bm_pool) |
1270 | return -ENOMEM; |
1271 | |
1272 | bm_pool->port_map |= BIT(port->id); |
1273 | mvpp2_rxq_long_pool_set(port, lrxq: i, long_pool: bm_pool->id); |
1274 | } |
1275 | |
1276 | port->pool_long = NULL; |
1277 | port->pool_short = NULL; |
1278 | |
1279 | return 0; |
1280 | } |
1281 | |
1282 | static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) |
1283 | { |
1284 | if (port->priv->percpu_pools) |
1285 | return mvpp2_swf_bm_pool_init_percpu(port); |
1286 | else |
1287 | return mvpp2_swf_bm_pool_init_shared(port); |
1288 | } |
1289 | |
1290 | static void mvpp2_set_hw_csum(struct mvpp2_port *port, |
1291 | enum mvpp2_bm_pool_log_num new_long_pool) |
1292 | { |
1293 | const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1294 | |
1295 | /* Update L4 checksum when jumbo enable/disable on port. |
1296 | * Only port 0 supports hardware checksum offload due to |
1297 | * the Tx FIFO size limitation. |
1298 | * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor |
1299 | * has 7 bits, so the maximum L3 offset is 128. |
1300 | */ |
1301 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { |
1302 | port->dev->features &= ~csums; |
1303 | port->dev->hw_features &= ~csums; |
1304 | } else { |
1305 | port->dev->features |= csums; |
1306 | port->dev->hw_features |= csums; |
1307 | } |
1308 | } |
1309 | |
1310 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) |
1311 | { |
1312 | struct mvpp2_port *port = netdev_priv(dev); |
1313 | enum mvpp2_bm_pool_log_num new_long_pool; |
1314 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
1315 | |
1316 | if (port->priv->percpu_pools) |
1317 | goto out_set; |
1318 | |
1319 | /* If port MTU is higher than 1518B: |
1320 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool |
1321 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool |
1322 | */ |
1323 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) |
1324 | new_long_pool = MVPP2_BM_JUMBO; |
1325 | else |
1326 | new_long_pool = MVPP2_BM_LONG; |
1327 | |
1328 | if (new_long_pool != port->pool_long->id) { |
1329 | if (port->tx_fc) { |
1330 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) |
1331 | mvpp2_bm_pool_update_fc(port, |
1332 | pool: port->pool_short, |
1333 | en: false); |
1334 | else |
1335 | mvpp2_bm_pool_update_fc(port, pool: port->pool_long, |
1336 | en: false); |
1337 | } |
1338 | |
1339 | /* Remove port from old short & long pool */ |
1340 | port->pool_long = mvpp2_bm_pool_use(port, pool: port->pool_long->id, |
1341 | pkt_size: port->pool_long->pkt_size); |
1342 | port->pool_long->port_map &= ~BIT(port->id); |
1343 | port->pool_long = NULL; |
1344 | |
1345 | port->pool_short = mvpp2_bm_pool_use(port, pool: port->pool_short->id, |
1346 | pkt_size: port->pool_short->pkt_size); |
1347 | port->pool_short->port_map &= ~BIT(port->id); |
1348 | port->pool_short = NULL; |
1349 | |
1350 | port->pkt_size = pkt_size; |
1351 | |
1352 | /* Add port to new short & long pool */ |
1353 | mvpp2_swf_bm_pool_init(port); |
1354 | |
1355 | mvpp2_set_hw_csum(port, new_long_pool); |
1356 | |
1357 | if (port->tx_fc) { |
1358 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) |
1359 | mvpp2_bm_pool_update_fc(port, pool: port->pool_long, |
1360 | en: true); |
1361 | else |
1362 | mvpp2_bm_pool_update_fc(port, pool: port->pool_short, |
1363 | en: true); |
1364 | } |
1365 | |
1366 | /* Update L4 checksum when jumbo enable/disable on port */ |
1367 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { |
1368 | dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
1369 | dev->hw_features &= ~(NETIF_F_IP_CSUM | |
1370 | NETIF_F_IPV6_CSUM); |
1371 | } else { |
1372 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1373 | dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1374 | } |
1375 | } |
1376 | |
1377 | out_set: |
1378 | dev->mtu = mtu; |
1379 | dev->wanted_features = dev->features; |
1380 | |
1381 | netdev_update_features(dev); |
1382 | return 0; |
1383 | } |
1384 | |
1385 | static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) |
1386 | { |
1387 | int i, sw_thread_mask = 0; |
1388 | |
1389 | for (i = 0; i < port->nqvecs; i++) |
1390 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; |
1391 | |
1392 | mvpp2_write(priv: port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
1393 | MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); |
1394 | } |
1395 | |
1396 | static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) |
1397 | { |
1398 | int i, sw_thread_mask = 0; |
1399 | |
1400 | for (i = 0; i < port->nqvecs; i++) |
1401 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; |
1402 | |
1403 | mvpp2_write(priv: port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
1404 | MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); |
1405 | } |
1406 | |
1407 | static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) |
1408 | { |
1409 | struct mvpp2_port *port = qvec->port; |
1410 | |
1411 | mvpp2_write(priv: port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
1412 | MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); |
1413 | } |
1414 | |
1415 | static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) |
1416 | { |
1417 | struct mvpp2_port *port = qvec->port; |
1418 | |
1419 | mvpp2_write(priv: port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
1420 | MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); |
1421 | } |
1422 | |
1423 | /* Mask the current thread's Rx/Tx interrupts |
1424 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1425 | * using smp_processor_id() is OK. |
1426 | */ |
1427 | static void mvpp2_interrupts_mask(void *arg) |
1428 | { |
1429 | struct mvpp2_port *port = arg; |
1430 | int cpu = smp_processor_id(); |
1431 | u32 thread; |
1432 | |
1433 | /* If the thread isn't used, don't do anything */ |
1434 | if (cpu > port->priv->nthreads) |
1435 | return; |
1436 | |
1437 | thread = mvpp2_cpu_to_thread(priv: port->priv, cpu); |
1438 | |
1439 | mvpp2_thread_write(priv: port->priv, thread, |
1440 | MVPP2_ISR_RX_TX_MASK_REG(port->id), data: 0); |
1441 | mvpp2_thread_write(priv: port->priv, thread, |
1442 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), data: 0); |
1443 | } |
1444 | |
1445 | /* Unmask the current thread's Rx/Tx interrupts. |
1446 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1447 | * using smp_processor_id() is OK. |
1448 | */ |
1449 | static void mvpp2_interrupts_unmask(void *arg) |
1450 | { |
1451 | struct mvpp2_port *port = arg; |
1452 | int cpu = smp_processor_id(); |
1453 | u32 val, thread; |
1454 | |
1455 | /* If the thread isn't used, don't do anything */ |
1456 | if (cpu >= port->priv->nthreads) |
1457 | return; |
1458 | |
1459 | thread = mvpp2_cpu_to_thread(priv: port->priv, cpu); |
1460 | |
1461 | val = MVPP2_CAUSE_MISC_SUM_MASK | |
1462 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); |
1463 | if (port->has_tx_irqs) |
1464 | val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
1465 | |
1466 | mvpp2_thread_write(priv: port->priv, thread, |
1467 | MVPP2_ISR_RX_TX_MASK_REG(port->id), data: val); |
1468 | mvpp2_thread_write(priv: port->priv, thread, |
1469 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), |
1470 | MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); |
1471 | } |
1472 | |
1473 | static void |
1474 | mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) |
1475 | { |
1476 | u32 val; |
1477 | int i; |
1478 | |
1479 | if (port->priv->hw_version == MVPP21) |
1480 | return; |
1481 | |
1482 | if (mask) |
1483 | val = 0; |
1484 | else |
1485 | val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); |
1486 | |
1487 | for (i = 0; i < port->nqvecs; i++) { |
1488 | struct mvpp2_queue_vector *v = port->qvecs + i; |
1489 | |
1490 | if (v->type != MVPP2_QUEUE_VECTOR_SHARED) |
1491 | continue; |
1492 | |
1493 | mvpp2_thread_write(priv: port->priv, thread: v->sw_thread_id, |
1494 | MVPP2_ISR_RX_TX_MASK_REG(port->id), data: val); |
1495 | mvpp2_thread_write(priv: port->priv, thread: v->sw_thread_id, |
1496 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), |
1497 | MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); |
1498 | } |
1499 | } |
1500 | |
1501 | /* Only GOP port 0 has an XLG MAC */ |
1502 | static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) |
1503 | { |
1504 | return port->gop_id == 0; |
1505 | } |
1506 | |
1507 | static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) |
1508 | { |
1509 | return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); |
1510 | } |
1511 | |
1512 | /* Port configuration routines */ |
1513 | static bool mvpp2_is_xlg(phy_interface_t interface) |
1514 | { |
1515 | return interface == PHY_INTERFACE_MODE_10GBASER || |
1516 | interface == PHY_INTERFACE_MODE_5GBASER || |
1517 | interface == PHY_INTERFACE_MODE_XAUI; |
1518 | } |
1519 | |
1520 | static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) |
1521 | { |
1522 | u32 old, val; |
1523 | |
1524 | old = val = readl(addr: ptr); |
1525 | val &= ~mask; |
1526 | val |= set; |
1527 | if (old != val) |
1528 | writel(val, addr: ptr); |
1529 | } |
1530 | |
1531 | static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) |
1532 | { |
1533 | struct mvpp2 *priv = port->priv; |
1534 | u32 val; |
1535 | |
1536 | regmap_read(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val: &val); |
1537 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; |
1538 | regmap_write(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val); |
1539 | |
1540 | regmap_read(map: priv->sysctrl_base, GENCONF_CTRL0, val: &val); |
1541 | if (port->gop_id == 2) { |
1542 | val |= GENCONF_CTRL0_PORT2_RGMII; |
1543 | } else if (port->gop_id == 3) { |
1544 | val |= GENCONF_CTRL0_PORT3_RGMII_MII; |
1545 | |
1546 | /* According to the specification, GENCONF_CTRL0_PORT3_RGMII |
1547 | * should be set to 1 for RGMII and 0 for MII. However, tests |
1548 | * show that it is the other way around. This is also what |
1549 | * U-Boot does for mvpp2, so it is assumed to be correct. |
1550 | */ |
1551 | if (port->phy_interface == PHY_INTERFACE_MODE_MII) |
1552 | val |= GENCONF_CTRL0_PORT3_RGMII; |
1553 | else |
1554 | val &= ~GENCONF_CTRL0_PORT3_RGMII; |
1555 | } |
1556 | regmap_write(map: priv->sysctrl_base, GENCONF_CTRL0, val); |
1557 | } |
1558 | |
1559 | static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) |
1560 | { |
1561 | struct mvpp2 *priv = port->priv; |
1562 | u32 val; |
1563 | |
1564 | regmap_read(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val: &val); |
1565 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | |
1566 | GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; |
1567 | regmap_write(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val); |
1568 | |
1569 | if (port->gop_id > 1) { |
1570 | regmap_read(map: priv->sysctrl_base, GENCONF_CTRL0, val: &val); |
1571 | if (port->gop_id == 2) |
1572 | val &= ~GENCONF_CTRL0_PORT2_RGMII; |
1573 | else if (port->gop_id == 3) |
1574 | val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; |
1575 | regmap_write(map: priv->sysctrl_base, GENCONF_CTRL0, val); |
1576 | } |
1577 | } |
1578 | |
1579 | static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) |
1580 | { |
1581 | struct mvpp2 *priv = port->priv; |
1582 | void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); |
1583 | void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
1584 | u32 val; |
1585 | |
1586 | val = readl(addr: xpcs + MVPP22_XPCS_CFG0); |
1587 | val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | |
1588 | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); |
1589 | val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); |
1590 | writel(val, addr: xpcs + MVPP22_XPCS_CFG0); |
1591 | |
1592 | val = readl(addr: mpcs + MVPP22_MPCS_CTRL); |
1593 | val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; |
1594 | writel(val, addr: mpcs + MVPP22_MPCS_CTRL); |
1595 | |
1596 | val = readl(addr: mpcs + MVPP22_MPCS_CLK_RESET); |
1597 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); |
1598 | val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); |
1599 | writel(val, addr: mpcs + MVPP22_MPCS_CLK_RESET); |
1600 | } |
1601 | |
1602 | static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) |
1603 | { |
1604 | struct mvpp2 *priv = port->priv; |
1605 | void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); |
1606 | u32 val; |
1607 | |
1608 | val = readl(addr: fca + MVPP22_FCA_CONTROL_REG); |
1609 | val &= ~MVPP22_FCA_ENABLE_PERIODIC; |
1610 | if (en) |
1611 | val |= MVPP22_FCA_ENABLE_PERIODIC; |
1612 | writel(val, addr: fca + MVPP22_FCA_CONTROL_REG); |
1613 | } |
1614 | |
1615 | static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) |
1616 | { |
1617 | struct mvpp2 *priv = port->priv; |
1618 | void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); |
1619 | u32 lsb, msb; |
1620 | |
1621 | lsb = timer & MVPP22_FCA_REG_MASK; |
1622 | msb = timer >> MVPP22_FCA_REG_SIZE; |
1623 | |
1624 | writel(val: lsb, addr: fca + MVPP22_PERIODIC_COUNTER_LSB_REG); |
1625 | writel(val: msb, addr: fca + MVPP22_PERIODIC_COUNTER_MSB_REG); |
1626 | } |
1627 | |
1628 | /* Set Flow Control timer x100 faster than pause quanta to ensure that link |
1629 | * partner won't send traffic if port is in XOFF mode. |
1630 | */ |
1631 | static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) |
1632 | { |
1633 | u32 timer; |
1634 | |
1635 | timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) |
1636 | * FC_QUANTA; |
1637 | |
1638 | mvpp22_gop_fca_enable_periodic(port, en: false); |
1639 | |
1640 | mvpp22_gop_fca_set_timer(port, timer); |
1641 | |
1642 | mvpp22_gop_fca_enable_periodic(port, en: true); |
1643 | } |
1644 | |
1645 | static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) |
1646 | { |
1647 | struct mvpp2 *priv = port->priv; |
1648 | u32 val; |
1649 | |
1650 | if (!priv->sysctrl_base) |
1651 | return 0; |
1652 | |
1653 | switch (interface) { |
1654 | case PHY_INTERFACE_MODE_MII: |
1655 | case PHY_INTERFACE_MODE_RGMII: |
1656 | case PHY_INTERFACE_MODE_RGMII_ID: |
1657 | case PHY_INTERFACE_MODE_RGMII_RXID: |
1658 | case PHY_INTERFACE_MODE_RGMII_TXID: |
1659 | if (!mvpp2_port_supports_rgmii(port)) |
1660 | goto invalid_conf; |
1661 | mvpp22_gop_init_rgmii(port); |
1662 | break; |
1663 | case PHY_INTERFACE_MODE_SGMII: |
1664 | case PHY_INTERFACE_MODE_1000BASEX: |
1665 | case PHY_INTERFACE_MODE_2500BASEX: |
1666 | mvpp22_gop_init_sgmii(port); |
1667 | break; |
1668 | case PHY_INTERFACE_MODE_5GBASER: |
1669 | case PHY_INTERFACE_MODE_10GBASER: |
1670 | if (!mvpp2_port_supports_xlg(port)) |
1671 | goto invalid_conf; |
1672 | mvpp22_gop_init_10gkr(port); |
1673 | break; |
1674 | default: |
1675 | goto unsupported_conf; |
1676 | } |
1677 | |
1678 | regmap_read(map: priv->sysctrl_base, GENCONF_PORT_CTRL1, val: &val); |
1679 | val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | |
1680 | GENCONF_PORT_CTRL1_EN(port->gop_id); |
1681 | regmap_write(map: priv->sysctrl_base, GENCONF_PORT_CTRL1, val); |
1682 | |
1683 | regmap_read(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val: &val); |
1684 | val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; |
1685 | regmap_write(map: priv->sysctrl_base, GENCONF_PORT_CTRL0, val); |
1686 | |
1687 | regmap_read(map: priv->sysctrl_base, GENCONF_SOFT_RESET1, val: &val); |
1688 | val |= GENCONF_SOFT_RESET1_GOP; |
1689 | regmap_write(map: priv->sysctrl_base, GENCONF_SOFT_RESET1, val); |
1690 | |
1691 | mvpp22_gop_fca_set_periodic_timer(port); |
1692 | |
1693 | unsupported_conf: |
1694 | return 0; |
1695 | |
1696 | invalid_conf: |
1697 | netdev_err(dev: port->dev, format: "Invalid port configuration\n" ); |
1698 | return -EINVAL; |
1699 | } |
1700 | |
1701 | static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) |
1702 | { |
1703 | u32 val; |
1704 | |
1705 | if (phy_interface_mode_is_rgmii(mode: port->phy_interface) || |
1706 | phy_interface_mode_is_8023z(mode: port->phy_interface) || |
1707 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
1708 | /* Enable the GMAC link status irq for this port */ |
1709 | val = readl(addr: port->base + MVPP22_GMAC_INT_SUM_MASK); |
1710 | val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; |
1711 | writel(val, addr: port->base + MVPP22_GMAC_INT_SUM_MASK); |
1712 | } |
1713 | |
1714 | if (mvpp2_port_supports_xlg(port)) { |
1715 | /* Enable the XLG/GIG irqs for this port */ |
1716 | val = readl(addr: port->base + MVPP22_XLG_EXT_INT_MASK); |
1717 | if (mvpp2_is_xlg(interface: port->phy_interface)) |
1718 | val |= MVPP22_XLG_EXT_INT_MASK_XLG; |
1719 | else |
1720 | val |= MVPP22_XLG_EXT_INT_MASK_GIG; |
1721 | writel(val, addr: port->base + MVPP22_XLG_EXT_INT_MASK); |
1722 | } |
1723 | } |
1724 | |
1725 | static void mvpp22_gop_mask_irq(struct mvpp2_port *port) |
1726 | { |
1727 | u32 val; |
1728 | |
1729 | if (mvpp2_port_supports_xlg(port)) { |
1730 | val = readl(addr: port->base + MVPP22_XLG_EXT_INT_MASK); |
1731 | val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | |
1732 | MVPP22_XLG_EXT_INT_MASK_GIG); |
1733 | writel(val, addr: port->base + MVPP22_XLG_EXT_INT_MASK); |
1734 | } |
1735 | |
1736 | if (phy_interface_mode_is_rgmii(mode: port->phy_interface) || |
1737 | phy_interface_mode_is_8023z(mode: port->phy_interface) || |
1738 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
1739 | val = readl(addr: port->base + MVPP22_GMAC_INT_SUM_MASK); |
1740 | val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; |
1741 | writel(val, addr: port->base + MVPP22_GMAC_INT_SUM_MASK); |
1742 | } |
1743 | } |
1744 | |
1745 | static void mvpp22_gop_setup_irq(struct mvpp2_port *port) |
1746 | { |
1747 | u32 val; |
1748 | |
1749 | mvpp2_modify(ptr: port->base + MVPP22_GMAC_INT_SUM_MASK, |
1750 | MVPP22_GMAC_INT_SUM_MASK_PTP, |
1751 | MVPP22_GMAC_INT_SUM_MASK_PTP); |
1752 | |
1753 | if (port->phylink || |
1754 | phy_interface_mode_is_rgmii(mode: port->phy_interface) || |
1755 | phy_interface_mode_is_8023z(mode: port->phy_interface) || |
1756 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
1757 | val = readl(addr: port->base + MVPP22_GMAC_INT_MASK); |
1758 | val |= MVPP22_GMAC_INT_MASK_LINK_STAT; |
1759 | writel(val, addr: port->base + MVPP22_GMAC_INT_MASK); |
1760 | } |
1761 | |
1762 | if (mvpp2_port_supports_xlg(port)) { |
1763 | val = readl(addr: port->base + MVPP22_XLG_INT_MASK); |
1764 | val |= MVPP22_XLG_INT_MASK_LINK; |
1765 | writel(val, addr: port->base + MVPP22_XLG_INT_MASK); |
1766 | |
1767 | mvpp2_modify(ptr: port->base + MVPP22_XLG_EXT_INT_MASK, |
1768 | MVPP22_XLG_EXT_INT_MASK_PTP, |
1769 | MVPP22_XLG_EXT_INT_MASK_PTP); |
1770 | } |
1771 | |
1772 | mvpp22_gop_unmask_irq(port); |
1773 | } |
1774 | |
1775 | /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). |
1776 | * |
1777 | * The PHY mode used by the PPv2 driver comes from the network subsystem, while |
1778 | * the one given to the COMPHY comes from the generic PHY subsystem. Hence they |
1779 | * differ. |
1780 | * |
1781 | * The COMPHY configures the serdes lanes regardless of the actual use of the |
1782 | * lanes by the physical layer. This is why configurations like |
1783 | * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. |
1784 | */ |
1785 | static int mvpp22_comphy_init(struct mvpp2_port *port, |
1786 | phy_interface_t interface) |
1787 | { |
1788 | int ret; |
1789 | |
1790 | if (!port->comphy) |
1791 | return 0; |
1792 | |
1793 | ret = phy_set_mode_ext(phy: port->comphy, mode: PHY_MODE_ETHERNET, submode: interface); |
1794 | if (ret) |
1795 | return ret; |
1796 | |
1797 | return phy_power_on(phy: port->comphy); |
1798 | } |
1799 | |
1800 | static void mvpp2_port_enable(struct mvpp2_port *port) |
1801 | { |
1802 | u32 val; |
1803 | |
1804 | if (mvpp2_port_supports_xlg(port) && |
1805 | mvpp2_is_xlg(interface: port->phy_interface)) { |
1806 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
1807 | val |= MVPP22_XLG_CTRL0_PORT_EN; |
1808 | val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; |
1809 | writel(val, addr: port->base + MVPP22_XLG_CTRL0_REG); |
1810 | } else { |
1811 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
1812 | val |= MVPP2_GMAC_PORT_EN_MASK; |
1813 | val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; |
1814 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
1815 | } |
1816 | } |
1817 | |
1818 | static void mvpp2_port_disable(struct mvpp2_port *port) |
1819 | { |
1820 | u32 val; |
1821 | |
1822 | if (mvpp2_port_supports_xlg(port) && |
1823 | mvpp2_is_xlg(interface: port->phy_interface)) { |
1824 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
1825 | val &= ~MVPP22_XLG_CTRL0_PORT_EN; |
1826 | writel(val, addr: port->base + MVPP22_XLG_CTRL0_REG); |
1827 | } |
1828 | |
1829 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
1830 | val &= ~(MVPP2_GMAC_PORT_EN_MASK); |
1831 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
1832 | } |
1833 | |
1834 | /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ |
1835 | static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) |
1836 | { |
1837 | u32 val; |
1838 | |
1839 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_1_REG) & |
1840 | ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; |
1841 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_1_REG); |
1842 | } |
1843 | |
1844 | /* Configure loopback port */ |
1845 | static void mvpp2_port_loopback_set(struct mvpp2_port *port, |
1846 | const struct phylink_link_state *state) |
1847 | { |
1848 | u32 val; |
1849 | |
1850 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_1_REG); |
1851 | |
1852 | if (state->speed == 1000) |
1853 | val |= MVPP2_GMAC_GMII_LB_EN_MASK; |
1854 | else |
1855 | val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; |
1856 | |
1857 | if (phy_interface_mode_is_8023z(mode: state->interface) || |
1858 | state->interface == PHY_INTERFACE_MODE_SGMII) |
1859 | val |= MVPP2_GMAC_PCS_LB_EN_MASK; |
1860 | else |
1861 | val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; |
1862 | |
1863 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_1_REG); |
1864 | } |
1865 | |
1866 | enum { |
1867 | ETHTOOL_XDP_REDIRECT, |
1868 | ETHTOOL_XDP_PASS, |
1869 | ETHTOOL_XDP_DROP, |
1870 | ETHTOOL_XDP_TX, |
1871 | ETHTOOL_XDP_TX_ERR, |
1872 | ETHTOOL_XDP_XMIT, |
1873 | ETHTOOL_XDP_XMIT_ERR, |
1874 | }; |
1875 | |
1876 | struct mvpp2_ethtool_counter { |
1877 | unsigned int offset; |
1878 | const char string[ETH_GSTRING_LEN]; |
1879 | bool reg_is_64b; |
1880 | }; |
1881 | |
1882 | static u64 mvpp2_read_count(struct mvpp2_port *port, |
1883 | const struct mvpp2_ethtool_counter *counter) |
1884 | { |
1885 | u64 val; |
1886 | |
1887 | val = readl(addr: port->stats_base + counter->offset); |
1888 | if (counter->reg_is_64b) |
1889 | val += (u64)readl(addr: port->stats_base + counter->offset + 4) << 32; |
1890 | |
1891 | return val; |
1892 | } |
1893 | |
1894 | /* Some counters are accessed indirectly by first writing an index to |
1895 | * MVPP2_CTRS_IDX. The index can represent various resources depending on the |
1896 | * register we access, it can be a hit counter for some classification tables, |
1897 | * a counter specific to a rxq, a txq or a buffer pool. |
1898 | */ |
1899 | static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) |
1900 | { |
1901 | mvpp2_write(priv, MVPP2_CTRS_IDX, data: index); |
1902 | return mvpp2_read(priv, offset: reg); |
1903 | } |
1904 | |
1905 | /* Due to the fact that software statistics and hardware statistics are, by |
1906 | * design, incremented at different moments in the chain of packet processing, |
1907 | * it is very likely that incoming packets could have been dropped after being |
1908 | * counted by hardware but before reaching software statistics (most probably |
1909 | * multicast packets), and in the opposite way, during transmission, FCS bytes |
1910 | * are added in between as well as TSO skb will be split and header bytes added. |
1911 | * Hence, statistics gathered from userspace with ifconfig (software) and |
1912 | * ethtool (hardware) cannot be compared. |
1913 | */ |
1914 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { |
1915 | { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received" , true }, |
1916 | { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, |
1917 | { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, |
1918 | { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, |
1919 | { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, |
1920 | { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, |
1921 | { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, |
1922 | { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, |
1923 | { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, |
1924 | { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, |
1925 | { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, |
1926 | { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, |
1927 | { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent" , true }, |
1928 | { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, |
1929 | { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, |
1930 | { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, |
1931 | { MVPP2_MIB_FC_SENT, "fc_sent" }, |
1932 | { MVPP2_MIB_FC_RCVD, "fc_received" }, |
1933 | { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, |
1934 | { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, |
1935 | { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, |
1936 | { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, |
1937 | { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, |
1938 | { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, |
1939 | { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, |
1940 | { MVPP2_MIB_COLLISION, "collision" }, |
1941 | { MVPP2_MIB_LATE_COLLISION, "late_collision" }, |
1942 | }; |
1943 | |
1944 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { |
1945 | { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, |
1946 | { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, |
1947 | }; |
1948 | |
1949 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { |
1950 | { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, |
1951 | { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, |
1952 | { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, |
1953 | { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, |
1954 | { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, |
1955 | { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, |
1956 | { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, |
1957 | { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, |
1958 | { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, |
1959 | }; |
1960 | |
1961 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { |
1962 | { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, |
1963 | { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, |
1964 | { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, |
1965 | { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, |
1966 | }; |
1967 | |
1968 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { |
1969 | { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect" , }, |
1970 | { ETHTOOL_XDP_PASS, "rx_xdp_pass" , }, |
1971 | { ETHTOOL_XDP_DROP, "rx_xdp_drop" , }, |
1972 | { ETHTOOL_XDP_TX, "rx_xdp_tx" , }, |
1973 | { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors" , }, |
1974 | { ETHTOOL_XDP_XMIT, "tx_xdp_xmit" , }, |
1975 | { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors" , }, |
1976 | }; |
1977 | |
1978 | #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ |
1979 | ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ |
1980 | (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ |
1981 | (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ |
1982 | ARRAY_SIZE(mvpp2_ethtool_xdp)) |
1983 | |
1984 | static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, |
1985 | u8 *data) |
1986 | { |
1987 | struct mvpp2_port *port = netdev_priv(dev: netdev); |
1988 | int i, q; |
1989 | |
1990 | if (sset != ETH_SS_STATS) |
1991 | return; |
1992 | |
1993 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { |
1994 | strscpy(data, mvpp2_ethtool_mib_regs[i].string, |
1995 | ETH_GSTRING_LEN); |
1996 | data += ETH_GSTRING_LEN; |
1997 | } |
1998 | |
1999 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { |
2000 | strscpy(data, mvpp2_ethtool_port_regs[i].string, |
2001 | ETH_GSTRING_LEN); |
2002 | data += ETH_GSTRING_LEN; |
2003 | } |
2004 | |
2005 | for (q = 0; q < port->ntxqs; q++) { |
2006 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { |
2007 | snprintf(buf: data, ETH_GSTRING_LEN, |
2008 | fmt: mvpp2_ethtool_txq_regs[i].string, q); |
2009 | data += ETH_GSTRING_LEN; |
2010 | } |
2011 | } |
2012 | |
2013 | for (q = 0; q < port->nrxqs; q++) { |
2014 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { |
2015 | snprintf(buf: data, ETH_GSTRING_LEN, |
2016 | fmt: mvpp2_ethtool_rxq_regs[i].string, |
2017 | q); |
2018 | data += ETH_GSTRING_LEN; |
2019 | } |
2020 | } |
2021 | |
2022 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { |
2023 | strscpy(data, mvpp2_ethtool_xdp[i].string, |
2024 | ETH_GSTRING_LEN); |
2025 | data += ETH_GSTRING_LEN; |
2026 | } |
2027 | } |
2028 | |
2029 | static void |
2030 | mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) |
2031 | { |
2032 | unsigned int start; |
2033 | unsigned int cpu; |
2034 | |
2035 | /* Gather XDP Statistics */ |
2036 | for_each_possible_cpu(cpu) { |
2037 | struct mvpp2_pcpu_stats *cpu_stats; |
2038 | u64 xdp_redirect; |
2039 | u64 xdp_pass; |
2040 | u64 xdp_drop; |
2041 | u64 xdp_xmit; |
2042 | u64 xdp_xmit_err; |
2043 | u64 xdp_tx; |
2044 | u64 xdp_tx_err; |
2045 | |
2046 | cpu_stats = per_cpu_ptr(port->stats, cpu); |
2047 | do { |
2048 | start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp); |
2049 | xdp_redirect = cpu_stats->xdp_redirect; |
2050 | xdp_pass = cpu_stats->xdp_pass; |
2051 | xdp_drop = cpu_stats->xdp_drop; |
2052 | xdp_xmit = cpu_stats->xdp_xmit; |
2053 | xdp_xmit_err = cpu_stats->xdp_xmit_err; |
2054 | xdp_tx = cpu_stats->xdp_tx; |
2055 | xdp_tx_err = cpu_stats->xdp_tx_err; |
2056 | } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start)); |
2057 | |
2058 | xdp_stats->xdp_redirect += xdp_redirect; |
2059 | xdp_stats->xdp_pass += xdp_pass; |
2060 | xdp_stats->xdp_drop += xdp_drop; |
2061 | xdp_stats->xdp_xmit += xdp_xmit; |
2062 | xdp_stats->xdp_xmit_err += xdp_xmit_err; |
2063 | xdp_stats->xdp_tx += xdp_tx; |
2064 | xdp_stats->xdp_tx_err += xdp_tx_err; |
2065 | } |
2066 | } |
2067 | |
2068 | static void mvpp2_read_stats(struct mvpp2_port *port) |
2069 | { |
2070 | struct mvpp2_pcpu_stats xdp_stats = {}; |
2071 | const struct mvpp2_ethtool_counter *s; |
2072 | u64 *pstats; |
2073 | int i, q; |
2074 | |
2075 | pstats = port->ethtool_stats; |
2076 | |
2077 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) |
2078 | *pstats++ += mvpp2_read_count(port, counter: &mvpp2_ethtool_mib_regs[i]); |
2079 | |
2080 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) |
2081 | *pstats++ += mvpp2_read(priv: port->priv, |
2082 | offset: mvpp2_ethtool_port_regs[i].offset + |
2083 | 4 * port->id); |
2084 | |
2085 | for (q = 0; q < port->ntxqs; q++) |
2086 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) |
2087 | *pstats++ += mvpp2_read_index(priv: port->priv, |
2088 | MVPP22_CTRS_TX_CTR(port->id, q), |
2089 | reg: mvpp2_ethtool_txq_regs[i].offset); |
2090 | |
2091 | /* Rxqs are numbered from 0 from the user standpoint, but not from the |
2092 | * driver's. We need to add the port->first_rxq offset. |
2093 | */ |
2094 | for (q = 0; q < port->nrxqs; q++) |
2095 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) |
2096 | *pstats++ += mvpp2_read_index(priv: port->priv, |
2097 | index: port->first_rxq + q, |
2098 | reg: mvpp2_ethtool_rxq_regs[i].offset); |
2099 | |
2100 | /* Gather XDP Statistics */ |
2101 | mvpp2_get_xdp_stats(port, xdp_stats: &xdp_stats); |
2102 | |
2103 | for (i = 0, s = mvpp2_ethtool_xdp; |
2104 | s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); |
2105 | s++, i++) { |
2106 | switch (s->offset) { |
2107 | case ETHTOOL_XDP_REDIRECT: |
2108 | *pstats++ = xdp_stats.xdp_redirect; |
2109 | break; |
2110 | case ETHTOOL_XDP_PASS: |
2111 | *pstats++ = xdp_stats.xdp_pass; |
2112 | break; |
2113 | case ETHTOOL_XDP_DROP: |
2114 | *pstats++ = xdp_stats.xdp_drop; |
2115 | break; |
2116 | case ETHTOOL_XDP_TX: |
2117 | *pstats++ = xdp_stats.xdp_tx; |
2118 | break; |
2119 | case ETHTOOL_XDP_TX_ERR: |
2120 | *pstats++ = xdp_stats.xdp_tx_err; |
2121 | break; |
2122 | case ETHTOOL_XDP_XMIT: |
2123 | *pstats++ = xdp_stats.xdp_xmit; |
2124 | break; |
2125 | case ETHTOOL_XDP_XMIT_ERR: |
2126 | *pstats++ = xdp_stats.xdp_xmit_err; |
2127 | break; |
2128 | } |
2129 | } |
2130 | } |
2131 | |
2132 | static void mvpp2_gather_hw_statistics(struct work_struct *work) |
2133 | { |
2134 | struct delayed_work *del_work = to_delayed_work(work); |
2135 | struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, |
2136 | stats_work); |
2137 | |
2138 | mutex_lock(&port->gather_stats_lock); |
2139 | |
2140 | mvpp2_read_stats(port); |
2141 | |
2142 | /* No need to read again the counters right after this function if it |
2143 | * was called asynchronously by the user (ie. use of ethtool). |
2144 | */ |
2145 | cancel_delayed_work(dwork: &port->stats_work); |
2146 | queue_delayed_work(wq: port->priv->stats_queue, dwork: &port->stats_work, |
2147 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
2148 | |
2149 | mutex_unlock(lock: &port->gather_stats_lock); |
2150 | } |
2151 | |
2152 | static void mvpp2_ethtool_get_stats(struct net_device *dev, |
2153 | struct ethtool_stats *stats, u64 *data) |
2154 | { |
2155 | struct mvpp2_port *port = netdev_priv(dev); |
2156 | |
2157 | /* Update statistics for the given port, then take the lock to avoid |
2158 | * concurrent accesses on the ethtool_stats structure during its copy. |
2159 | */ |
2160 | mvpp2_gather_hw_statistics(work: &port->stats_work.work); |
2161 | |
2162 | mutex_lock(&port->gather_stats_lock); |
2163 | memcpy(data, port->ethtool_stats, |
2164 | sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); |
2165 | mutex_unlock(lock: &port->gather_stats_lock); |
2166 | } |
2167 | |
2168 | static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) |
2169 | { |
2170 | struct mvpp2_port *port = netdev_priv(dev); |
2171 | |
2172 | if (sset == ETH_SS_STATS) |
2173 | return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); |
2174 | |
2175 | return -EOPNOTSUPP; |
2176 | } |
2177 | |
2178 | static void mvpp2_mac_reset_assert(struct mvpp2_port *port) |
2179 | { |
2180 | u32 val; |
2181 | |
2182 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_2_REG) | |
2183 | MVPP2_GMAC_PORT_RESET_MASK; |
2184 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_2_REG); |
2185 | |
2186 | if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { |
2187 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG) & |
2188 | ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; |
2189 | writel(val, addr: port->base + MVPP22_XLG_CTRL0_REG); |
2190 | } |
2191 | } |
2192 | |
2193 | static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) |
2194 | { |
2195 | struct mvpp2 *priv = port->priv; |
2196 | void __iomem *mpcs, *xpcs; |
2197 | u32 val; |
2198 | |
2199 | if (port->priv->hw_version == MVPP21 || port->gop_id != 0) |
2200 | return; |
2201 | |
2202 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); |
2203 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
2204 | |
2205 | val = readl(addr: mpcs + MVPP22_MPCS_CLK_RESET); |
2206 | val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); |
2207 | val |= MVPP22_MPCS_CLK_RESET_DIV_SET; |
2208 | writel(val, addr: mpcs + MVPP22_MPCS_CLK_RESET); |
2209 | |
2210 | val = readl(addr: xpcs + MVPP22_XPCS_CFG0); |
2211 | writel(val: val & ~MVPP22_XPCS_CFG0_RESET_DIS, addr: xpcs + MVPP22_XPCS_CFG0); |
2212 | } |
2213 | |
2214 | static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, |
2215 | phy_interface_t interface) |
2216 | { |
2217 | struct mvpp2 *priv = port->priv; |
2218 | void __iomem *mpcs, *xpcs; |
2219 | u32 val; |
2220 | |
2221 | if (port->priv->hw_version == MVPP21 || port->gop_id != 0) |
2222 | return; |
2223 | |
2224 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); |
2225 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
2226 | |
2227 | switch (interface) { |
2228 | case PHY_INTERFACE_MODE_5GBASER: |
2229 | case PHY_INTERFACE_MODE_10GBASER: |
2230 | val = readl(addr: mpcs + MVPP22_MPCS_CLK_RESET); |
2231 | val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | |
2232 | MAC_CLK_RESET_SD_TX; |
2233 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; |
2234 | writel(val, addr: mpcs + MVPP22_MPCS_CLK_RESET); |
2235 | break; |
2236 | case PHY_INTERFACE_MODE_XAUI: |
2237 | case PHY_INTERFACE_MODE_RXAUI: |
2238 | val = readl(addr: xpcs + MVPP22_XPCS_CFG0); |
2239 | writel(val: val | MVPP22_XPCS_CFG0_RESET_DIS, addr: xpcs + MVPP22_XPCS_CFG0); |
2240 | break; |
2241 | default: |
2242 | break; |
2243 | } |
2244 | } |
2245 | |
2246 | /* Change maximum receive size of the port */ |
2247 | static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) |
2248 | { |
2249 | u32 val; |
2250 | |
2251 | val = readl(addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
2252 | val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; |
2253 | val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << |
2254 | MVPP2_GMAC_MAX_RX_SIZE_OFFS); |
2255 | writel(val, addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
2256 | } |
2257 | |
2258 | /* Change maximum receive size of the port */ |
2259 | static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) |
2260 | { |
2261 | u32 val; |
2262 | |
2263 | val = readl(addr: port->base + MVPP22_XLG_CTRL1_REG); |
2264 | val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; |
2265 | val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << |
2266 | MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; |
2267 | writel(val, addr: port->base + MVPP22_XLG_CTRL1_REG); |
2268 | } |
2269 | |
2270 | /* Set defaults to the MVPP2 port */ |
2271 | static void mvpp2_defaults_set(struct mvpp2_port *port) |
2272 | { |
2273 | int tx_port_num, val, queue, lrxq; |
2274 | |
2275 | if (port->priv->hw_version == MVPP21) { |
2276 | /* Update TX FIFO MIN Threshold */ |
2277 | val = readl(addr: port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); |
2278 | val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; |
2279 | /* Min. TX threshold must be less than minimal packet length */ |
2280 | val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); |
2281 | writel(val, addr: port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); |
2282 | } |
2283 | |
2284 | /* Disable Legacy WRR, Disable EJP, Release from reset */ |
2285 | tx_port_num = mvpp2_egress_port(port); |
2286 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, |
2287 | data: tx_port_num); |
2288 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_CMD_1_REG, data: 0); |
2289 | |
2290 | /* Set TXQ scheduling to Round-Robin */ |
2291 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, data: 0); |
2292 | |
2293 | /* Close bandwidth for all queues */ |
2294 | for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) |
2295 | mvpp2_write(priv: port->priv, |
2296 | MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), data: 0); |
2297 | |
2298 | /* Set refill period to 1 usec, refill tokens |
2299 | * and bucket size to maximum |
2300 | */ |
2301 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PERIOD_REG, |
2302 | data: port->priv->tclk / USEC_PER_SEC); |
2303 | val = mvpp2_read(priv: port->priv, MVPP2_TXP_SCHED_REFILL_REG); |
2304 | val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; |
2305 | val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); |
2306 | val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; |
2307 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_REFILL_REG, data: val); |
2308 | val = MVPP2_TXP_TOKEN_SIZE_MAX; |
2309 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, data: val); |
2310 | |
2311 | /* Set MaximumLowLatencyPacketSize value to 256 */ |
2312 | mvpp2_write(priv: port->priv, MVPP2_RX_CTRL_REG(port->id), |
2313 | MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | |
2314 | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); |
2315 | |
2316 | /* Enable Rx cache snoop */ |
2317 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
2318 | queue = port->rxqs[lrxq]->id; |
2319 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue)); |
2320 | val |= MVPP2_SNOOP_PKT_SIZE_MASK | |
2321 | MVPP2_SNOOP_BUF_HDR_MASK; |
2322 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue), data: val); |
2323 | } |
2324 | |
2325 | /* At default, mask all interrupts to all present cpus */ |
2326 | mvpp2_interrupts_disable(port); |
2327 | } |
2328 | |
2329 | /* Enable/disable receiving packets */ |
2330 | static void mvpp2_ingress_enable(struct mvpp2_port *port) |
2331 | { |
2332 | u32 val; |
2333 | int lrxq, queue; |
2334 | |
2335 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
2336 | queue = port->rxqs[lrxq]->id; |
2337 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue)); |
2338 | val &= ~MVPP2_RXQ_DISABLE_MASK; |
2339 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue), data: val); |
2340 | } |
2341 | } |
2342 | |
2343 | static void mvpp2_ingress_disable(struct mvpp2_port *port) |
2344 | { |
2345 | u32 val; |
2346 | int lrxq, queue; |
2347 | |
2348 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
2349 | queue = port->rxqs[lrxq]->id; |
2350 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue)); |
2351 | val |= MVPP2_RXQ_DISABLE_MASK; |
2352 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(queue), data: val); |
2353 | } |
2354 | } |
2355 | |
2356 | /* Enable transmit via physical egress queue |
2357 | * - HW starts take descriptors from DRAM |
2358 | */ |
2359 | static void mvpp2_egress_enable(struct mvpp2_port *port) |
2360 | { |
2361 | u32 qmap; |
2362 | int queue; |
2363 | int tx_port_num = mvpp2_egress_port(port); |
2364 | |
2365 | /* Enable all initialized TXs. */ |
2366 | qmap = 0; |
2367 | for (queue = 0; queue < port->ntxqs; queue++) { |
2368 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
2369 | |
2370 | if (txq->descs) |
2371 | qmap |= (1 << queue); |
2372 | } |
2373 | |
2374 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, data: tx_port_num); |
2375 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, data: qmap); |
2376 | } |
2377 | |
2378 | /* Disable transmit via physical egress queue |
2379 | * - HW doesn't take descriptors from DRAM |
2380 | */ |
2381 | static void mvpp2_egress_disable(struct mvpp2_port *port) |
2382 | { |
2383 | u32 reg_data; |
2384 | int delay; |
2385 | int tx_port_num = mvpp2_egress_port(port); |
2386 | |
2387 | /* Issue stop command for active channels only */ |
2388 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, data: tx_port_num); |
2389 | reg_data = (mvpp2_read(priv: port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & |
2390 | MVPP2_TXP_SCHED_ENQ_MASK; |
2391 | if (reg_data != 0) |
2392 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, |
2393 | data: (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); |
2394 | |
2395 | /* Wait for all Tx activity to terminate. */ |
2396 | delay = 0; |
2397 | do { |
2398 | if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { |
2399 | netdev_warn(dev: port->dev, |
2400 | format: "Tx stop timed out, status=0x%08x\n" , |
2401 | reg_data); |
2402 | break; |
2403 | } |
2404 | mdelay(1); |
2405 | delay++; |
2406 | |
2407 | /* Check port TX Command register that all |
2408 | * Tx queues are stopped |
2409 | */ |
2410 | reg_data = mvpp2_read(priv: port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); |
2411 | } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); |
2412 | } |
2413 | |
2414 | /* Rx descriptors helper methods */ |
2415 | |
2416 | /* Get number of Rx descriptors occupied by received packets */ |
2417 | static inline int |
2418 | mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) |
2419 | { |
2420 | u32 val = mvpp2_read(priv: port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); |
2421 | |
2422 | return val & MVPP2_RXQ_OCCUPIED_MASK; |
2423 | } |
2424 | |
2425 | /* Update Rx queue status with the number of occupied and available |
2426 | * Rx descriptor slots. |
2427 | */ |
2428 | static inline void |
2429 | mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, |
2430 | int used_count, int free_count) |
2431 | { |
2432 | /* Decrement the number of used descriptors and increment count |
2433 | * increment the number of free descriptors. |
2434 | */ |
2435 | u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); |
2436 | |
2437 | mvpp2_write(priv: port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), data: val); |
2438 | } |
2439 | |
2440 | /* Get pointer to next RX descriptor to be processed by SW */ |
2441 | static inline struct mvpp2_rx_desc * |
2442 | mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) |
2443 | { |
2444 | int rx_desc = rxq->next_desc_to_proc; |
2445 | |
2446 | rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); |
2447 | prefetch(rxq->descs + rxq->next_desc_to_proc); |
2448 | return rxq->descs + rx_desc; |
2449 | } |
2450 | |
2451 | /* Set rx queue offset */ |
2452 | static void mvpp2_rxq_offset_set(struct mvpp2_port *port, |
2453 | int prxq, int offset) |
2454 | { |
2455 | u32 val; |
2456 | |
2457 | /* Convert offset from bytes to units of 32 bytes */ |
2458 | offset = offset >> 5; |
2459 | |
2460 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
2461 | val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; |
2462 | |
2463 | /* Offset is in */ |
2464 | val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & |
2465 | MVPP2_RXQ_PACKET_OFFSET_MASK); |
2466 | |
2467 | mvpp2_write(priv: port->priv, MVPP2_RXQ_CONFIG_REG(prxq), data: val); |
2468 | } |
2469 | |
2470 | /* Tx descriptors helper methods */ |
2471 | |
2472 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
2473 | static struct mvpp2_tx_desc * |
2474 | mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) |
2475 | { |
2476 | int tx_desc = txq->next_desc_to_proc; |
2477 | |
2478 | txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); |
2479 | return txq->descs + tx_desc; |
2480 | } |
2481 | |
2482 | /* Update HW with number of aggregated Tx descriptors to be sent |
2483 | * |
2484 | * Called only from mvpp2_tx(), so migration is disabled, using |
2485 | * smp_processor_id() is OK. |
2486 | */ |
2487 | static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) |
2488 | { |
2489 | /* aggregated access - relevant TXQ number is written in TX desc */ |
2490 | mvpp2_thread_write(priv: port->priv, |
2491 | thread: mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()), |
2492 | MVPP2_AGGR_TXQ_UPDATE_REG, data: pending); |
2493 | } |
2494 | |
2495 | /* Check if there are enough free descriptors in aggregated txq. |
2496 | * If not, update the number of occupied descriptors and repeat the check. |
2497 | * |
2498 | * Called only from mvpp2_tx(), so migration is disabled, using |
2499 | * smp_processor_id() is OK. |
2500 | */ |
2501 | static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, |
2502 | struct mvpp2_tx_queue *aggr_txq, int num) |
2503 | { |
2504 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { |
2505 | /* Update number of occupied aggregated Tx descriptors */ |
2506 | unsigned int thread = |
2507 | mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
2508 | u32 val = mvpp2_read_relaxed(priv: port->priv, |
2509 | MVPP2_AGGR_TXQ_STATUS_REG(thread)); |
2510 | |
2511 | aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; |
2512 | |
2513 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) |
2514 | return -ENOMEM; |
2515 | } |
2516 | return 0; |
2517 | } |
2518 | |
2519 | /* Reserved Tx descriptors allocation request |
2520 | * |
2521 | * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called |
2522 | * only by mvpp2_tx(), so migration is disabled, using |
2523 | * smp_processor_id() is OK. |
2524 | */ |
2525 | static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, |
2526 | struct mvpp2_tx_queue *txq, int num) |
2527 | { |
2528 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
2529 | struct mvpp2 *priv = port->priv; |
2530 | u32 val; |
2531 | |
2532 | val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; |
2533 | mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, data: val); |
2534 | |
2535 | val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); |
2536 | |
2537 | return val & MVPP2_TXQ_RSVD_RSLT_MASK; |
2538 | } |
2539 | |
2540 | /* Check if there are enough reserved descriptors for transmission. |
2541 | * If not, request chunk of reserved descriptors and check again. |
2542 | */ |
2543 | static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, |
2544 | struct mvpp2_tx_queue *txq, |
2545 | struct mvpp2_txq_pcpu *txq_pcpu, |
2546 | int num) |
2547 | { |
2548 | int req, desc_count; |
2549 | unsigned int thread; |
2550 | |
2551 | if (txq_pcpu->reserved_num >= num) |
2552 | return 0; |
2553 | |
2554 | /* Not enough descriptors reserved! Update the reserved descriptor |
2555 | * count and check again. |
2556 | */ |
2557 | |
2558 | desc_count = 0; |
2559 | /* Compute total of used descriptors */ |
2560 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
2561 | struct mvpp2_txq_pcpu *txq_pcpu_aux; |
2562 | |
2563 | txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); |
2564 | desc_count += txq_pcpu_aux->count; |
2565 | desc_count += txq_pcpu_aux->reserved_num; |
2566 | } |
2567 | |
2568 | req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); |
2569 | desc_count += req; |
2570 | |
2571 | if (desc_count > |
2572 | (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) |
2573 | return -ENOMEM; |
2574 | |
2575 | txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, num: req); |
2576 | |
2577 | /* OK, the descriptor could have been updated: check again. */ |
2578 | if (txq_pcpu->reserved_num < num) |
2579 | return -ENOMEM; |
2580 | return 0; |
2581 | } |
2582 | |
2583 | /* Release the last allocated Tx descriptor. Useful to handle DMA |
2584 | * mapping failures in the Tx path. |
2585 | */ |
2586 | static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) |
2587 | { |
2588 | if (txq->next_desc_to_proc == 0) |
2589 | txq->next_desc_to_proc = txq->last_desc - 1; |
2590 | else |
2591 | txq->next_desc_to_proc--; |
2592 | } |
2593 | |
2594 | /* Set Tx descriptors fields relevant for CSUM calculation */ |
2595 | static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, |
2596 | int ip_hdr_len, int l4_proto) |
2597 | { |
2598 | u32 command; |
2599 | |
2600 | /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, |
2601 | * G_L4_chk, L4_type required only for checksum calculation |
2602 | */ |
2603 | command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); |
2604 | command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); |
2605 | command |= MVPP2_TXD_IP_CSUM_DISABLE; |
2606 | |
2607 | if (l3_proto == htons(ETH_P_IP)) { |
2608 | command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ |
2609 | command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ |
2610 | } else { |
2611 | command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ |
2612 | } |
2613 | |
2614 | if (l4_proto == IPPROTO_TCP) { |
2615 | command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ |
2616 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ |
2617 | } else if (l4_proto == IPPROTO_UDP) { |
2618 | command |= MVPP2_TXD_L4_UDP; /* enable UDP */ |
2619 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ |
2620 | } else { |
2621 | command |= MVPP2_TXD_L4_CSUM_NOT; |
2622 | } |
2623 | |
2624 | return command; |
2625 | } |
2626 | |
2627 | /* Get number of sent descriptors and decrement counter. |
2628 | * The number of sent descriptors is returned. |
2629 | * Per-thread access |
2630 | * |
2631 | * Called only from mvpp2_txq_done(), called from mvpp2_tx() |
2632 | * (migration disabled) and from the TX completion tasklet (migration |
2633 | * disabled) so using smp_processor_id() is OK. |
2634 | */ |
2635 | static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, |
2636 | struct mvpp2_tx_queue *txq) |
2637 | { |
2638 | u32 val; |
2639 | |
2640 | /* Reading status reg resets transmitted descriptor counter */ |
2641 | val = mvpp2_thread_read_relaxed(priv: port->priv, |
2642 | thread: mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()), |
2643 | MVPP2_TXQ_SENT_REG(txq->id)); |
2644 | |
2645 | return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> |
2646 | MVPP2_TRANSMITTED_COUNT_OFFSET; |
2647 | } |
2648 | |
2649 | /* Called through on_each_cpu(), so runs on all CPUs, with migration |
2650 | * disabled, therefore using smp_processor_id() is OK. |
2651 | */ |
2652 | static void mvpp2_txq_sent_counter_clear(void *arg) |
2653 | { |
2654 | struct mvpp2_port *port = arg; |
2655 | int queue; |
2656 | |
2657 | /* If the thread isn't used, don't do anything */ |
2658 | if (smp_processor_id() >= port->priv->nthreads) |
2659 | return; |
2660 | |
2661 | for (queue = 0; queue < port->ntxqs; queue++) { |
2662 | int id = port->txqs[queue]->id; |
2663 | |
2664 | mvpp2_thread_read(priv: port->priv, |
2665 | thread: mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()), |
2666 | MVPP2_TXQ_SENT_REG(id)); |
2667 | } |
2668 | } |
2669 | |
2670 | /* Set max sizes for Tx queues */ |
2671 | static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) |
2672 | { |
2673 | u32 val, size, mtu; |
2674 | int txq, tx_port_num; |
2675 | |
2676 | mtu = port->pkt_size * 8; |
2677 | if (mtu > MVPP2_TXP_MTU_MAX) |
2678 | mtu = MVPP2_TXP_MTU_MAX; |
2679 | |
2680 | /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ |
2681 | mtu = 3 * mtu; |
2682 | |
2683 | /* Indirect access to registers */ |
2684 | tx_port_num = mvpp2_egress_port(port); |
2685 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, data: tx_port_num); |
2686 | |
2687 | /* Set MTU */ |
2688 | val = mvpp2_read(priv: port->priv, MVPP2_TXP_SCHED_MTU_REG); |
2689 | val &= ~MVPP2_TXP_MTU_MAX; |
2690 | val |= mtu; |
2691 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_MTU_REG, data: val); |
2692 | |
2693 | /* TXP token size and all TXQs token size must be larger that MTU */ |
2694 | val = mvpp2_read(priv: port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); |
2695 | size = val & MVPP2_TXP_TOKEN_SIZE_MAX; |
2696 | if (size < mtu) { |
2697 | size = mtu; |
2698 | val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; |
2699 | val |= size; |
2700 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, data: val); |
2701 | } |
2702 | |
2703 | for (txq = 0; txq < port->ntxqs; txq++) { |
2704 | val = mvpp2_read(priv: port->priv, |
2705 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); |
2706 | size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; |
2707 | |
2708 | if (size < mtu) { |
2709 | size = mtu; |
2710 | val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; |
2711 | val |= size; |
2712 | mvpp2_write(priv: port->priv, |
2713 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), |
2714 | data: val); |
2715 | } |
2716 | } |
2717 | } |
2718 | |
2719 | /* Set the number of non-occupied descriptors threshold */ |
2720 | static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, |
2721 | struct mvpp2_rx_queue *rxq) |
2722 | { |
2723 | u32 val; |
2724 | |
2725 | mvpp2_write(priv: port->priv, MVPP2_RXQ_NUM_REG, data: rxq->id); |
2726 | |
2727 | val = mvpp2_read(priv: port->priv, MVPP2_RXQ_THRESH_REG); |
2728 | val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; |
2729 | val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; |
2730 | mvpp2_write(priv: port->priv, MVPP2_RXQ_THRESH_REG, data: val); |
2731 | } |
2732 | |
2733 | /* Set the number of packets that will be received before Rx interrupt |
2734 | * will be generated by HW. |
2735 | */ |
2736 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, |
2737 | struct mvpp2_rx_queue *rxq) |
2738 | { |
2739 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
2740 | |
2741 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
2742 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; |
2743 | |
2744 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_NUM_REG, data: rxq->id); |
2745 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_THRESH_REG, |
2746 | data: rxq->pkts_coal); |
2747 | |
2748 | put_cpu(); |
2749 | } |
2750 | |
2751 | /* For some reason in the LSP this is done on each CPU. Why ? */ |
2752 | static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, |
2753 | struct mvpp2_tx_queue *txq) |
2754 | { |
2755 | unsigned int thread; |
2756 | u32 val; |
2757 | |
2758 | if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) |
2759 | txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; |
2760 | |
2761 | val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); |
2762 | /* PKT-coalescing registers are per-queue + per-thread */ |
2763 | for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { |
2764 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_NUM_REG, data: txq->id); |
2765 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_THRESH_REG, data: val); |
2766 | } |
2767 | } |
2768 | |
2769 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
2770 | { |
2771 | u64 tmp = (u64)clk_hz * usec; |
2772 | |
2773 | do_div(tmp, USEC_PER_SEC); |
2774 | |
2775 | return tmp > U32_MAX ? U32_MAX : tmp; |
2776 | } |
2777 | |
2778 | static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) |
2779 | { |
2780 | u64 tmp = (u64)cycles * USEC_PER_SEC; |
2781 | |
2782 | do_div(tmp, clk_hz); |
2783 | |
2784 | return tmp > U32_MAX ? U32_MAX : tmp; |
2785 | } |
2786 | |
2787 | /* Set the time delay in usec before Rx interrupt */ |
2788 | static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, |
2789 | struct mvpp2_rx_queue *rxq) |
2790 | { |
2791 | unsigned long freq = port->priv->tclk; |
2792 | u32 val = mvpp2_usec_to_cycles(usec: rxq->time_coal, clk_hz: freq); |
2793 | |
2794 | if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { |
2795 | rxq->time_coal = |
2796 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, clk_hz: freq); |
2797 | |
2798 | /* re-evaluate to get actual register value */ |
2799 | val = mvpp2_usec_to_cycles(usec: rxq->time_coal, clk_hz: freq); |
2800 | } |
2801 | |
2802 | mvpp2_write(priv: port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), data: val); |
2803 | } |
2804 | |
2805 | static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) |
2806 | { |
2807 | unsigned long freq = port->priv->tclk; |
2808 | u32 val = mvpp2_usec_to_cycles(usec: port->tx_time_coal, clk_hz: freq); |
2809 | |
2810 | if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { |
2811 | port->tx_time_coal = |
2812 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, clk_hz: freq); |
2813 | |
2814 | /* re-evaluate to get actual register value */ |
2815 | val = mvpp2_usec_to_cycles(usec: port->tx_time_coal, clk_hz: freq); |
2816 | } |
2817 | |
2818 | mvpp2_write(priv: port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), data: val); |
2819 | } |
2820 | |
2821 | /* Free Tx queue skbuffs */ |
2822 | static void mvpp2_txq_bufs_free(struct mvpp2_port *port, |
2823 | struct mvpp2_tx_queue *txq, |
2824 | struct mvpp2_txq_pcpu *txq_pcpu, int num) |
2825 | { |
2826 | struct xdp_frame_bulk bq; |
2827 | int i; |
2828 | |
2829 | xdp_frame_bulk_init(bq: &bq); |
2830 | |
2831 | rcu_read_lock(); /* need for xdp_return_frame_bulk */ |
2832 | |
2833 | for (i = 0; i < num; i++) { |
2834 | struct mvpp2_txq_pcpu_buf *tx_buf = |
2835 | txq_pcpu->buffs + txq_pcpu->txq_get_index; |
2836 | |
2837 | if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && |
2838 | tx_buf->type != MVPP2_TYPE_XDP_TX) |
2839 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, |
2840 | tx_buf->size, DMA_TO_DEVICE); |
2841 | if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) |
2842 | dev_kfree_skb_any(skb: tx_buf->skb); |
2843 | else if (tx_buf->type == MVPP2_TYPE_XDP_TX || |
2844 | tx_buf->type == MVPP2_TYPE_XDP_NDO) |
2845 | xdp_return_frame_bulk(xdpf: tx_buf->xdpf, bq: &bq); |
2846 | |
2847 | mvpp2_txq_inc_get(txq_pcpu); |
2848 | } |
2849 | xdp_flush_frame_bulk(bq: &bq); |
2850 | |
2851 | rcu_read_unlock(); |
2852 | } |
2853 | |
2854 | static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, |
2855 | u32 cause) |
2856 | { |
2857 | int queue = fls(x: cause) - 1; |
2858 | |
2859 | return port->rxqs[queue]; |
2860 | } |
2861 | |
2862 | static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, |
2863 | u32 cause) |
2864 | { |
2865 | int queue = fls(x: cause) - 1; |
2866 | |
2867 | return port->txqs[queue]; |
2868 | } |
2869 | |
2870 | /* Handle end of transmission */ |
2871 | static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
2872 | struct mvpp2_txq_pcpu *txq_pcpu) |
2873 | { |
2874 | struct netdev_queue *nq = netdev_get_tx_queue(dev: port->dev, index: txq->log_id); |
2875 | int tx_done; |
2876 | |
2877 | if (txq_pcpu->thread != mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id())) |
2878 | netdev_err(dev: port->dev, format: "wrong cpu on the end of Tx processing\n" ); |
2879 | |
2880 | tx_done = mvpp2_txq_sent_desc_proc(port, txq); |
2881 | if (!tx_done) |
2882 | return; |
2883 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, num: tx_done); |
2884 | |
2885 | txq_pcpu->count -= tx_done; |
2886 | |
2887 | if (netif_tx_queue_stopped(dev_queue: nq)) |
2888 | if (txq_pcpu->count <= txq_pcpu->wake_threshold) |
2889 | netif_tx_wake_queue(dev_queue: nq); |
2890 | } |
2891 | |
2892 | static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, |
2893 | unsigned int thread) |
2894 | { |
2895 | struct mvpp2_tx_queue *txq; |
2896 | struct mvpp2_txq_pcpu *txq_pcpu; |
2897 | unsigned int tx_todo = 0; |
2898 | |
2899 | while (cause) { |
2900 | txq = mvpp2_get_tx_queue(port, cause); |
2901 | if (!txq) |
2902 | break; |
2903 | |
2904 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
2905 | |
2906 | if (txq_pcpu->count) { |
2907 | mvpp2_txq_done(port, txq, txq_pcpu); |
2908 | tx_todo += txq_pcpu->count; |
2909 | } |
2910 | |
2911 | cause &= ~(1 << txq->log_id); |
2912 | } |
2913 | return tx_todo; |
2914 | } |
2915 | |
2916 | /* Rx/Tx queue initialization/cleanup methods */ |
2917 | |
2918 | /* Allocate and initialize descriptors for aggr TXQ */ |
2919 | static int mvpp2_aggr_txq_init(struct platform_device *pdev, |
2920 | struct mvpp2_tx_queue *aggr_txq, |
2921 | unsigned int thread, struct mvpp2 *priv) |
2922 | { |
2923 | u32 txq_dma; |
2924 | |
2925 | /* Allocate memory for TX descriptors */ |
2926 | aggr_txq->descs = dma_alloc_coherent(dev: &pdev->dev, |
2927 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, |
2928 | dma_handle: &aggr_txq->descs_dma, GFP_KERNEL); |
2929 | if (!aggr_txq->descs) |
2930 | return -ENOMEM; |
2931 | |
2932 | aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; |
2933 | |
2934 | /* Aggr TXQ no reset WA */ |
2935 | aggr_txq->next_desc_to_proc = mvpp2_read(priv, |
2936 | MVPP2_AGGR_TXQ_INDEX_REG(thread)); |
2937 | |
2938 | /* Set Tx descriptors queue starting address indirect |
2939 | * access |
2940 | */ |
2941 | if (priv->hw_version == MVPP21) |
2942 | txq_dma = aggr_txq->descs_dma; |
2943 | else |
2944 | txq_dma = aggr_txq->descs_dma >> |
2945 | MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; |
2946 | |
2947 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), data: txq_dma); |
2948 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), |
2949 | MVPP2_AGGR_TXQ_SIZE); |
2950 | |
2951 | return 0; |
2952 | } |
2953 | |
2954 | /* Create a specified Rx queue */ |
2955 | static int mvpp2_rxq_init(struct mvpp2_port *port, |
2956 | struct mvpp2_rx_queue *rxq) |
2957 | { |
2958 | struct mvpp2 *priv = port->priv; |
2959 | unsigned int thread; |
2960 | u32 rxq_dma; |
2961 | int err; |
2962 | |
2963 | rxq->size = port->rx_ring_size; |
2964 | |
2965 | /* Allocate memory for RX descriptors */ |
2966 | rxq->descs = dma_alloc_coherent(dev: port->dev->dev.parent, |
2967 | size: rxq->size * MVPP2_DESC_ALIGNED_SIZE, |
2968 | dma_handle: &rxq->descs_dma, GFP_KERNEL); |
2969 | if (!rxq->descs) |
2970 | return -ENOMEM; |
2971 | |
2972 | rxq->last_desc = rxq->size - 1; |
2973 | |
2974 | /* Zero occupied and non-occupied counters - direct access */ |
2975 | mvpp2_write(priv: port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), data: 0); |
2976 | |
2977 | /* Set Rx descriptors queue starting address - indirect access */ |
2978 | thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
2979 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_NUM_REG, data: rxq->id); |
2980 | if (port->priv->hw_version == MVPP21) |
2981 | rxq_dma = rxq->descs_dma; |
2982 | else |
2983 | rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; |
2984 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, data: rxq_dma); |
2985 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, data: rxq->size); |
2986 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_INDEX_REG, data: 0); |
2987 | put_cpu(); |
2988 | |
2989 | /* Set Offset */ |
2990 | mvpp2_rxq_offset_set(port, prxq: rxq->id, MVPP2_SKB_HEADROOM); |
2991 | |
2992 | /* Set coalescing pkts and time */ |
2993 | mvpp2_rx_pkts_coal_set(port, rxq); |
2994 | mvpp2_rx_time_coal_set(port, rxq); |
2995 | |
2996 | /* Set the number of non occupied descriptors threshold */ |
2997 | mvpp2_set_rxq_free_tresh(port, rxq); |
2998 | |
2999 | /* Add number of descriptors ready for receiving packets */ |
3000 | mvpp2_rxq_status_update(port, rxq_id: rxq->id, used_count: 0, free_count: rxq->size); |
3001 | |
3002 | if (priv->percpu_pools) { |
3003 | err = xdp_rxq_info_reg(xdp_rxq: &rxq->xdp_rxq_short, dev: port->dev, queue_index: rxq->logic_rxq, napi_id: 0); |
3004 | if (err < 0) |
3005 | goto err_free_dma; |
3006 | |
3007 | err = xdp_rxq_info_reg(xdp_rxq: &rxq->xdp_rxq_long, dev: port->dev, queue_index: rxq->logic_rxq, napi_id: 0); |
3008 | if (err < 0) |
3009 | goto err_unregister_rxq_short; |
3010 | |
3011 | /* Every RXQ has a pool for short and another for long packets */ |
3012 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxq->xdp_rxq_short, |
3013 | type: MEM_TYPE_PAGE_POOL, |
3014 | allocator: priv->page_pool[rxq->logic_rxq]); |
3015 | if (err < 0) |
3016 | goto err_unregister_rxq_long; |
3017 | |
3018 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxq->xdp_rxq_long, |
3019 | type: MEM_TYPE_PAGE_POOL, |
3020 | allocator: priv->page_pool[rxq->logic_rxq + |
3021 | port->nrxqs]); |
3022 | if (err < 0) |
3023 | goto err_unregister_mem_rxq_short; |
3024 | } |
3025 | |
3026 | return 0; |
3027 | |
3028 | err_unregister_mem_rxq_short: |
3029 | xdp_rxq_info_unreg_mem_model(xdp_rxq: &rxq->xdp_rxq_short); |
3030 | err_unregister_rxq_long: |
3031 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq_long); |
3032 | err_unregister_rxq_short: |
3033 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq_short); |
3034 | err_free_dma: |
3035 | dma_free_coherent(dev: port->dev->dev.parent, |
3036 | size: rxq->size * MVPP2_DESC_ALIGNED_SIZE, |
3037 | cpu_addr: rxq->descs, dma_handle: rxq->descs_dma); |
3038 | return err; |
3039 | } |
3040 | |
3041 | /* Push packets received by the RXQ to BM pool */ |
3042 | static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, |
3043 | struct mvpp2_rx_queue *rxq) |
3044 | { |
3045 | int rx_received, i; |
3046 | |
3047 | rx_received = mvpp2_rxq_received(port, rxq_id: rxq->id); |
3048 | if (!rx_received) |
3049 | return; |
3050 | |
3051 | for (i = 0; i < rx_received; i++) { |
3052 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
3053 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
3054 | int pool; |
3055 | |
3056 | pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
3057 | MVPP2_RXD_BM_POOL_ID_OFFS; |
3058 | |
3059 | mvpp2_bm_pool_put(port, pool, |
3060 | buf_dma_addr: mvpp2_rxdesc_dma_addr_get(port, rx_desc), |
3061 | buf_phys_addr: mvpp2_rxdesc_cookie_get(port, rx_desc)); |
3062 | } |
3063 | mvpp2_rxq_status_update(port, rxq_id: rxq->id, used_count: rx_received, free_count: rx_received); |
3064 | } |
3065 | |
3066 | /* Cleanup Rx queue */ |
3067 | static void mvpp2_rxq_deinit(struct mvpp2_port *port, |
3068 | struct mvpp2_rx_queue *rxq) |
3069 | { |
3070 | unsigned int thread; |
3071 | |
3072 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxq->xdp_rxq_short)) |
3073 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq_short); |
3074 | |
3075 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxq->xdp_rxq_long)) |
3076 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq_long); |
3077 | |
3078 | mvpp2_rxq_drop_pkts(port, rxq); |
3079 | |
3080 | if (rxq->descs) |
3081 | dma_free_coherent(dev: port->dev->dev.parent, |
3082 | size: rxq->size * MVPP2_DESC_ALIGNED_SIZE, |
3083 | cpu_addr: rxq->descs, |
3084 | dma_handle: rxq->descs_dma); |
3085 | |
3086 | rxq->descs = NULL; |
3087 | rxq->last_desc = 0; |
3088 | rxq->next_desc_to_proc = 0; |
3089 | rxq->descs_dma = 0; |
3090 | |
3091 | /* Clear Rx descriptors queue starting address and size; |
3092 | * free descriptor number |
3093 | */ |
3094 | mvpp2_write(priv: port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), data: 0); |
3095 | thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
3096 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_NUM_REG, data: rxq->id); |
3097 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, data: 0); |
3098 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, data: 0); |
3099 | put_cpu(); |
3100 | } |
3101 | |
3102 | /* Create and initialize a Tx queue */ |
3103 | static int mvpp2_txq_init(struct mvpp2_port *port, |
3104 | struct mvpp2_tx_queue *txq) |
3105 | { |
3106 | u32 val; |
3107 | unsigned int thread; |
3108 | int desc, desc_per_txq, tx_port_num; |
3109 | struct mvpp2_txq_pcpu *txq_pcpu; |
3110 | |
3111 | txq->size = port->tx_ring_size; |
3112 | |
3113 | /* Allocate memory for Tx descriptors */ |
3114 | txq->descs = dma_alloc_coherent(dev: port->dev->dev.parent, |
3115 | size: txq->size * MVPP2_DESC_ALIGNED_SIZE, |
3116 | dma_handle: &txq->descs_dma, GFP_KERNEL); |
3117 | if (!txq->descs) |
3118 | return -ENOMEM; |
3119 | |
3120 | txq->last_desc = txq->size - 1; |
3121 | |
3122 | /* Set Tx descriptors queue starting address - indirect access */ |
3123 | thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
3124 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_NUM_REG, data: txq->id); |
3125 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, |
3126 | data: txq->descs_dma); |
3127 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, |
3128 | data: txq->size & MVPP2_TXQ_DESC_SIZE_MASK); |
3129 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_INDEX_REG, data: 0); |
3130 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, |
3131 | data: txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); |
3132 | val = mvpp2_thread_read(priv: port->priv, thread, MVPP2_TXQ_PENDING_REG); |
3133 | val &= ~MVPP2_TXQ_PENDING_MASK; |
3134 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_PENDING_REG, data: val); |
3135 | |
3136 | /* Calculate base address in prefetch buffer. We reserve 16 descriptors |
3137 | * for each existing TXQ. |
3138 | * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT |
3139 | * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS |
3140 | */ |
3141 | desc_per_txq = 16; |
3142 | desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + |
3143 | (txq->log_id * desc_per_txq); |
3144 | |
3145 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, |
3146 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
3147 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); |
3148 | put_cpu(); |
3149 | |
3150 | /* WRR / EJP configuration - indirect access */ |
3151 | tx_port_num = mvpp2_egress_port(port); |
3152 | mvpp2_write(priv: port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, data: tx_port_num); |
3153 | |
3154 | val = mvpp2_read(priv: port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); |
3155 | val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; |
3156 | val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); |
3157 | val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; |
3158 | mvpp2_write(priv: port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), data: val); |
3159 | |
3160 | val = MVPP2_TXQ_TOKEN_SIZE_MAX; |
3161 | mvpp2_write(priv: port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), |
3162 | data: val); |
3163 | |
3164 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
3165 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3166 | txq_pcpu->size = txq->size; |
3167 | txq_pcpu->buffs = kmalloc_array(n: txq_pcpu->size, |
3168 | size: sizeof(*txq_pcpu->buffs), |
3169 | GFP_KERNEL); |
3170 | if (!txq_pcpu->buffs) |
3171 | return -ENOMEM; |
3172 | |
3173 | txq_pcpu->count = 0; |
3174 | txq_pcpu->reserved_num = 0; |
3175 | txq_pcpu->txq_put_index = 0; |
3176 | txq_pcpu->txq_get_index = 0; |
3177 | txq_pcpu->tso_headers = NULL; |
3178 | |
3179 | txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; |
3180 | txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; |
3181 | |
3182 | txq_pcpu->tso_headers = |
3183 | dma_alloc_coherent(dev: port->dev->dev.parent, |
3184 | size: txq_pcpu->size * TSO_HEADER_SIZE, |
3185 | dma_handle: &txq_pcpu->tso_headers_dma, |
3186 | GFP_KERNEL); |
3187 | if (!txq_pcpu->tso_headers) |
3188 | return -ENOMEM; |
3189 | } |
3190 | |
3191 | return 0; |
3192 | } |
3193 | |
3194 | /* Free allocated TXQ resources */ |
3195 | static void mvpp2_txq_deinit(struct mvpp2_port *port, |
3196 | struct mvpp2_tx_queue *txq) |
3197 | { |
3198 | struct mvpp2_txq_pcpu *txq_pcpu; |
3199 | unsigned int thread; |
3200 | |
3201 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
3202 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3203 | kfree(objp: txq_pcpu->buffs); |
3204 | |
3205 | if (txq_pcpu->tso_headers) |
3206 | dma_free_coherent(dev: port->dev->dev.parent, |
3207 | size: txq_pcpu->size * TSO_HEADER_SIZE, |
3208 | cpu_addr: txq_pcpu->tso_headers, |
3209 | dma_handle: txq_pcpu->tso_headers_dma); |
3210 | |
3211 | txq_pcpu->tso_headers = NULL; |
3212 | } |
3213 | |
3214 | if (txq->descs) |
3215 | dma_free_coherent(dev: port->dev->dev.parent, |
3216 | size: txq->size * MVPP2_DESC_ALIGNED_SIZE, |
3217 | cpu_addr: txq->descs, dma_handle: txq->descs_dma); |
3218 | |
3219 | txq->descs = NULL; |
3220 | txq->last_desc = 0; |
3221 | txq->next_desc_to_proc = 0; |
3222 | txq->descs_dma = 0; |
3223 | |
3224 | /* Set minimum bandwidth for disabled TXQs */ |
3225 | mvpp2_write(priv: port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), data: 0); |
3226 | |
3227 | /* Set Tx descriptors queue starting address and size */ |
3228 | thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
3229 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_NUM_REG, data: txq->id); |
3230 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, data: 0); |
3231 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, data: 0); |
3232 | put_cpu(); |
3233 | } |
3234 | |
3235 | /* Cleanup Tx ports */ |
3236 | static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) |
3237 | { |
3238 | struct mvpp2_txq_pcpu *txq_pcpu; |
3239 | int delay, pending; |
3240 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, get_cpu()); |
3241 | u32 val; |
3242 | |
3243 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_NUM_REG, data: txq->id); |
3244 | val = mvpp2_thread_read(priv: port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); |
3245 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
3246 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, data: val); |
3247 | |
3248 | /* The napi queue has been stopped so wait for all packets |
3249 | * to be transmitted. |
3250 | */ |
3251 | delay = 0; |
3252 | do { |
3253 | if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { |
3254 | netdev_warn(dev: port->dev, |
3255 | format: "port %d: cleaning queue %d timed out\n" , |
3256 | port->id, txq->log_id); |
3257 | break; |
3258 | } |
3259 | mdelay(1); |
3260 | delay++; |
3261 | |
3262 | pending = mvpp2_thread_read(priv: port->priv, thread, |
3263 | MVPP2_TXQ_PENDING_REG); |
3264 | pending &= MVPP2_TXQ_PENDING_MASK; |
3265 | } while (pending); |
3266 | |
3267 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; |
3268 | mvpp2_thread_write(priv: port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, data: val); |
3269 | put_cpu(); |
3270 | |
3271 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
3272 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3273 | |
3274 | /* Release all packets */ |
3275 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, num: txq_pcpu->count); |
3276 | |
3277 | /* Reset queue */ |
3278 | txq_pcpu->count = 0; |
3279 | txq_pcpu->txq_put_index = 0; |
3280 | txq_pcpu->txq_get_index = 0; |
3281 | } |
3282 | } |
3283 | |
3284 | /* Cleanup all Tx queues */ |
3285 | static void mvpp2_cleanup_txqs(struct mvpp2_port *port) |
3286 | { |
3287 | struct mvpp2_tx_queue *txq; |
3288 | int queue; |
3289 | u32 val; |
3290 | |
3291 | val = mvpp2_read(priv: port->priv, MVPP2_TX_PORT_FLUSH_REG); |
3292 | |
3293 | /* Reset Tx ports and delete Tx queues */ |
3294 | val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); |
3295 | mvpp2_write(priv: port->priv, MVPP2_TX_PORT_FLUSH_REG, data: val); |
3296 | |
3297 | for (queue = 0; queue < port->ntxqs; queue++) { |
3298 | txq = port->txqs[queue]; |
3299 | mvpp2_txq_clean(port, txq); |
3300 | mvpp2_txq_deinit(port, txq); |
3301 | } |
3302 | |
3303 | on_each_cpu(func: mvpp2_txq_sent_counter_clear, info: port, wait: 1); |
3304 | |
3305 | val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); |
3306 | mvpp2_write(priv: port->priv, MVPP2_TX_PORT_FLUSH_REG, data: val); |
3307 | } |
3308 | |
3309 | /* Cleanup all Rx queues */ |
3310 | static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) |
3311 | { |
3312 | int queue; |
3313 | |
3314 | for (queue = 0; queue < port->nrxqs; queue++) |
3315 | mvpp2_rxq_deinit(port, rxq: port->rxqs[queue]); |
3316 | |
3317 | if (port->tx_fc) |
3318 | mvpp2_rxq_disable_fc(port); |
3319 | } |
3320 | |
3321 | /* Init all Rx queues for port */ |
3322 | static int mvpp2_setup_rxqs(struct mvpp2_port *port) |
3323 | { |
3324 | int queue, err; |
3325 | |
3326 | for (queue = 0; queue < port->nrxqs; queue++) { |
3327 | err = mvpp2_rxq_init(port, rxq: port->rxqs[queue]); |
3328 | if (err) |
3329 | goto err_cleanup; |
3330 | } |
3331 | |
3332 | if (port->tx_fc) |
3333 | mvpp2_rxq_enable_fc(port); |
3334 | |
3335 | return 0; |
3336 | |
3337 | err_cleanup: |
3338 | mvpp2_cleanup_rxqs(port); |
3339 | return err; |
3340 | } |
3341 | |
3342 | /* Init all tx queues for port */ |
3343 | static int mvpp2_setup_txqs(struct mvpp2_port *port) |
3344 | { |
3345 | struct mvpp2_tx_queue *txq; |
3346 | int queue, err; |
3347 | |
3348 | for (queue = 0; queue < port->ntxqs; queue++) { |
3349 | txq = port->txqs[queue]; |
3350 | err = mvpp2_txq_init(port, txq); |
3351 | if (err) |
3352 | goto err_cleanup; |
3353 | |
3354 | /* Assign this queue to a CPU */ |
3355 | if (queue < num_possible_cpus()) |
3356 | netif_set_xps_queue(dev: port->dev, cpumask_of(queue), index: queue); |
3357 | } |
3358 | |
3359 | if (port->has_tx_irqs) { |
3360 | mvpp2_tx_time_coal_set(port); |
3361 | for (queue = 0; queue < port->ntxqs; queue++) { |
3362 | txq = port->txqs[queue]; |
3363 | mvpp2_tx_pkts_coal_set(port, txq); |
3364 | } |
3365 | } |
3366 | |
3367 | on_each_cpu(func: mvpp2_txq_sent_counter_clear, info: port, wait: 1); |
3368 | return 0; |
3369 | |
3370 | err_cleanup: |
3371 | mvpp2_cleanup_txqs(port); |
3372 | return err; |
3373 | } |
3374 | |
3375 | /* The callback for per-port interrupt */ |
3376 | static irqreturn_t mvpp2_isr(int irq, void *dev_id) |
3377 | { |
3378 | struct mvpp2_queue_vector *qv = dev_id; |
3379 | |
3380 | mvpp2_qvec_interrupt_disable(qvec: qv); |
3381 | |
3382 | napi_schedule(n: &qv->napi); |
3383 | |
3384 | return IRQ_HANDLED; |
3385 | } |
3386 | |
3387 | static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) |
3388 | { |
3389 | struct skb_shared_hwtstamps shhwtstamps; |
3390 | struct mvpp2_hwtstamp_queue *queue; |
3391 | struct sk_buff *skb; |
3392 | void __iomem *ptp_q; |
3393 | unsigned int id; |
3394 | u32 r0, r1, r2; |
3395 | |
3396 | ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
3397 | if (nq) |
3398 | ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; |
3399 | |
3400 | queue = &port->tx_hwtstamp_queue[nq]; |
3401 | |
3402 | while (1) { |
3403 | r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; |
3404 | if (!r0) |
3405 | break; |
3406 | |
3407 | r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; |
3408 | r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; |
3409 | |
3410 | id = (r0 >> 1) & 31; |
3411 | |
3412 | skb = queue->skb[id]; |
3413 | queue->skb[id] = NULL; |
3414 | if (skb) { |
3415 | u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; |
3416 | |
3417 | mvpp22_tai_tstamp(tai: port->priv->tai, tstamp: ts, hwtstamp: &shhwtstamps); |
3418 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
3419 | dev_kfree_skb_any(skb); |
3420 | } |
3421 | } |
3422 | } |
3423 | |
3424 | static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) |
3425 | { |
3426 | void __iomem *ptp; |
3427 | u32 val; |
3428 | |
3429 | ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
3430 | val = readl(addr: ptp + MVPP22_PTP_INT_CAUSE); |
3431 | if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) |
3432 | mvpp2_isr_handle_ptp_queue(port, nq: 0); |
3433 | if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) |
3434 | mvpp2_isr_handle_ptp_queue(port, nq: 1); |
3435 | } |
3436 | |
3437 | static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) |
3438 | { |
3439 | struct net_device *dev = port->dev; |
3440 | |
3441 | if (port->phylink) { |
3442 | phylink_mac_change(port->phylink, up: link); |
3443 | return; |
3444 | } |
3445 | |
3446 | if (!netif_running(dev)) |
3447 | return; |
3448 | |
3449 | if (link) { |
3450 | mvpp2_interrupts_enable(port); |
3451 | |
3452 | mvpp2_egress_enable(port); |
3453 | mvpp2_ingress_enable(port); |
3454 | netif_carrier_on(dev); |
3455 | netif_tx_wake_all_queues(dev); |
3456 | } else { |
3457 | netif_tx_stop_all_queues(dev); |
3458 | netif_carrier_off(dev); |
3459 | mvpp2_ingress_disable(port); |
3460 | mvpp2_egress_disable(port); |
3461 | |
3462 | mvpp2_interrupts_disable(port); |
3463 | } |
3464 | } |
3465 | |
3466 | static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) |
3467 | { |
3468 | bool link; |
3469 | u32 val; |
3470 | |
3471 | val = readl(addr: port->base + MVPP22_XLG_INT_STAT); |
3472 | if (val & MVPP22_XLG_INT_STAT_LINK) { |
3473 | val = readl(addr: port->base + MVPP22_XLG_STATUS); |
3474 | link = (val & MVPP22_XLG_STATUS_LINK_UP); |
3475 | mvpp2_isr_handle_link(port, link); |
3476 | } |
3477 | } |
3478 | |
3479 | static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) |
3480 | { |
3481 | bool link; |
3482 | u32 val; |
3483 | |
3484 | if (phy_interface_mode_is_rgmii(mode: port->phy_interface) || |
3485 | phy_interface_mode_is_8023z(mode: port->phy_interface) || |
3486 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
3487 | val = readl(addr: port->base + MVPP22_GMAC_INT_STAT); |
3488 | if (val & MVPP22_GMAC_INT_STAT_LINK) { |
3489 | val = readl(addr: port->base + MVPP2_GMAC_STATUS0); |
3490 | link = (val & MVPP2_GMAC_STATUS0_LINK_UP); |
3491 | mvpp2_isr_handle_link(port, link); |
3492 | } |
3493 | } |
3494 | } |
3495 | |
3496 | /* Per-port interrupt for link status changes */ |
3497 | static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) |
3498 | { |
3499 | struct mvpp2_port *port = (struct mvpp2_port *)dev_id; |
3500 | u32 val; |
3501 | |
3502 | mvpp22_gop_mask_irq(port); |
3503 | |
3504 | if (mvpp2_port_supports_xlg(port) && |
3505 | mvpp2_is_xlg(interface: port->phy_interface)) { |
3506 | /* Check the external status register */ |
3507 | val = readl(addr: port->base + MVPP22_XLG_EXT_INT_STAT); |
3508 | if (val & MVPP22_XLG_EXT_INT_STAT_XLG) |
3509 | mvpp2_isr_handle_xlg(port); |
3510 | if (val & MVPP22_XLG_EXT_INT_STAT_PTP) |
3511 | mvpp2_isr_handle_ptp(port); |
3512 | } else { |
3513 | /* If it's not the XLG, we must be using the GMAC. |
3514 | * Check the summary status. |
3515 | */ |
3516 | val = readl(addr: port->base + MVPP22_GMAC_INT_SUM_STAT); |
3517 | if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) |
3518 | mvpp2_isr_handle_gmac_internal(port); |
3519 | if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) |
3520 | mvpp2_isr_handle_ptp(port); |
3521 | } |
3522 | |
3523 | mvpp22_gop_unmask_irq(port); |
3524 | return IRQ_HANDLED; |
3525 | } |
3526 | |
3527 | static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) |
3528 | { |
3529 | struct net_device *dev; |
3530 | struct mvpp2_port *port; |
3531 | struct mvpp2_port_pcpu *port_pcpu; |
3532 | unsigned int tx_todo, cause; |
3533 | |
3534 | port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); |
3535 | dev = port_pcpu->dev; |
3536 | |
3537 | if (!netif_running(dev)) |
3538 | return HRTIMER_NORESTART; |
3539 | |
3540 | port_pcpu->timer_scheduled = false; |
3541 | port = netdev_priv(dev); |
3542 | |
3543 | /* Process all the Tx queues */ |
3544 | cause = (1 << port->ntxqs) - 1; |
3545 | tx_todo = mvpp2_tx_done(port, cause, |
3546 | thread: mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id())); |
3547 | |
3548 | /* Set the timer in case not all the packets were processed */ |
3549 | if (tx_todo && !port_pcpu->timer_scheduled) { |
3550 | port_pcpu->timer_scheduled = true; |
3551 | hrtimer_forward_now(timer: &port_pcpu->tx_done_timer, |
3552 | MVPP2_TXDONE_HRTIMER_PERIOD_NS); |
3553 | |
3554 | return HRTIMER_RESTART; |
3555 | } |
3556 | return HRTIMER_NORESTART; |
3557 | } |
3558 | |
3559 | /* Main RX/TX processing routines */ |
3560 | |
3561 | /* Display more error info */ |
3562 | static void mvpp2_rx_error(struct mvpp2_port *port, |
3563 | struct mvpp2_rx_desc *rx_desc) |
3564 | { |
3565 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
3566 | size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); |
3567 | char *err_str = NULL; |
3568 | |
3569 | switch (status & MVPP2_RXD_ERR_CODE_MASK) { |
3570 | case MVPP2_RXD_ERR_CRC: |
3571 | err_str = "crc" ; |
3572 | break; |
3573 | case MVPP2_RXD_ERR_OVERRUN: |
3574 | err_str = "overrun" ; |
3575 | break; |
3576 | case MVPP2_RXD_ERR_RESOURCE: |
3577 | err_str = "resource" ; |
3578 | break; |
3579 | } |
3580 | if (err_str && net_ratelimit()) |
3581 | netdev_err(dev: port->dev, |
3582 | format: "bad rx status %08x (%s error), size=%zu\n" , |
3583 | status, err_str, sz); |
3584 | } |
3585 | |
3586 | /* Handle RX checksum offload */ |
3587 | static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) |
3588 | { |
3589 | if (((status & MVPP2_RXD_L3_IP4) && |
3590 | !(status & MVPP2_RXD_IP4_HEADER_ERR)) || |
3591 | (status & MVPP2_RXD_L3_IP6)) |
3592 | if (((status & MVPP2_RXD_L4_UDP) || |
3593 | (status & MVPP2_RXD_L4_TCP)) && |
3594 | (status & MVPP2_RXD_L4_CSUM_OK)) |
3595 | return CHECKSUM_UNNECESSARY; |
3596 | |
3597 | return CHECKSUM_NONE; |
3598 | } |
3599 | |
3600 | /* Allocate a new skb and add it to BM pool */ |
3601 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
3602 | struct mvpp2_bm_pool *bm_pool, |
3603 | struct page_pool *page_pool, int pool) |
3604 | { |
3605 | dma_addr_t dma_addr; |
3606 | phys_addr_t phys_addr; |
3607 | void *buf; |
3608 | |
3609 | buf = mvpp2_buf_alloc(port, bm_pool, page_pool, |
3610 | buf_dma_addr: &dma_addr, buf_phys_addr: &phys_addr, GFP_ATOMIC); |
3611 | if (!buf) |
3612 | return -ENOMEM; |
3613 | |
3614 | mvpp2_bm_pool_put(port, pool, buf_dma_addr: dma_addr, buf_phys_addr: phys_addr); |
3615 | |
3616 | return 0; |
3617 | } |
3618 | |
3619 | /* Handle tx checksum */ |
3620 | static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) |
3621 | { |
3622 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3623 | int ip_hdr_len = 0; |
3624 | u8 l4_proto; |
3625 | __be16 l3_proto = vlan_get_protocol(skb); |
3626 | |
3627 | if (l3_proto == htons(ETH_P_IP)) { |
3628 | struct iphdr *ip4h = ip_hdr(skb); |
3629 | |
3630 | /* Calculate IPv4 checksum and L4 checksum */ |
3631 | ip_hdr_len = ip4h->ihl; |
3632 | l4_proto = ip4h->protocol; |
3633 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
3634 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
3635 | |
3636 | /* Read l4_protocol from one of IPv6 extra headers */ |
3637 | if (skb_network_header_len(skb) > 0) |
3638 | ip_hdr_len = (skb_network_header_len(skb) >> 2); |
3639 | l4_proto = ip6h->nexthdr; |
3640 | } else { |
3641 | return MVPP2_TXD_L4_CSUM_NOT; |
3642 | } |
3643 | |
3644 | return mvpp2_txq_desc_csum(l3_offs: skb_network_offset(skb), |
3645 | l3_proto, ip_hdr_len, l4_proto); |
3646 | } |
3647 | |
3648 | return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; |
3649 | } |
3650 | |
3651 | static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) |
3652 | { |
3653 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
3654 | struct mvpp2_tx_queue *aggr_txq; |
3655 | struct mvpp2_txq_pcpu *txq_pcpu; |
3656 | struct mvpp2_tx_queue *txq; |
3657 | struct netdev_queue *nq; |
3658 | |
3659 | txq = port->txqs[txq_id]; |
3660 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3661 | nq = netdev_get_tx_queue(dev: port->dev, index: txq_id); |
3662 | aggr_txq = &port->priv->aggr_txqs[thread]; |
3663 | |
3664 | txq_pcpu->reserved_num -= nxmit; |
3665 | txq_pcpu->count += nxmit; |
3666 | aggr_txq->count += nxmit; |
3667 | |
3668 | /* Enable transmit */ |
3669 | wmb(); |
3670 | mvpp2_aggr_txq_pend_desc_add(port, pending: nxmit); |
3671 | |
3672 | if (txq_pcpu->count >= txq_pcpu->stop_threshold) |
3673 | netif_tx_stop_queue(dev_queue: nq); |
3674 | |
3675 | /* Finalize TX processing */ |
3676 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
3677 | mvpp2_txq_done(port, txq, txq_pcpu); |
3678 | } |
3679 | |
3680 | static int |
3681 | mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, |
3682 | struct xdp_frame *xdpf, bool dma_map) |
3683 | { |
3684 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
3685 | u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | |
3686 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; |
3687 | enum mvpp2_tx_buf_type buf_type; |
3688 | struct mvpp2_txq_pcpu *txq_pcpu; |
3689 | struct mvpp2_tx_queue *aggr_txq; |
3690 | struct mvpp2_tx_desc *tx_desc; |
3691 | struct mvpp2_tx_queue *txq; |
3692 | int ret = MVPP2_XDP_TX; |
3693 | dma_addr_t dma_addr; |
3694 | |
3695 | txq = port->txqs[txq_id]; |
3696 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3697 | aggr_txq = &port->priv->aggr_txqs[thread]; |
3698 | |
3699 | /* Check number of available descriptors */ |
3700 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, num: 1) || |
3701 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, num: 1)) { |
3702 | ret = MVPP2_XDP_DROPPED; |
3703 | goto out; |
3704 | } |
3705 | |
3706 | /* Get a descriptor for the first part of the packet */ |
3707 | tx_desc = mvpp2_txq_next_desc_get(txq: aggr_txq); |
3708 | mvpp2_txdesc_txq_set(port, tx_desc, txq: txq->id); |
3709 | mvpp2_txdesc_size_set(port, tx_desc, size: xdpf->len); |
3710 | |
3711 | if (dma_map) { |
3712 | /* XDP_REDIRECT or AF_XDP */ |
3713 | dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, |
3714 | xdpf->len, DMA_TO_DEVICE); |
3715 | |
3716 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { |
3717 | mvpp2_txq_desc_put(txq); |
3718 | ret = MVPP2_XDP_DROPPED; |
3719 | goto out; |
3720 | } |
3721 | |
3722 | buf_type = MVPP2_TYPE_XDP_NDO; |
3723 | } else { |
3724 | /* XDP_TX */ |
3725 | struct page *page = virt_to_page(xdpf->data); |
3726 | |
3727 | dma_addr = page_pool_get_dma_addr(page) + |
3728 | sizeof(*xdpf) + xdpf->headroom; |
3729 | dma_sync_single_for_device(dev: port->dev->dev.parent, addr: dma_addr, |
3730 | size: xdpf->len, dir: DMA_BIDIRECTIONAL); |
3731 | |
3732 | buf_type = MVPP2_TYPE_XDP_TX; |
3733 | } |
3734 | |
3735 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); |
3736 | |
3737 | mvpp2_txdesc_cmd_set(port, tx_desc, command: tx_cmd); |
3738 | mvpp2_txq_inc_put(port, txq_pcpu, data: xdpf, tx_desc, buf_type); |
3739 | |
3740 | out: |
3741 | return ret; |
3742 | } |
3743 | |
3744 | static int |
3745 | mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) |
3746 | { |
3747 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
3748 | struct xdp_frame *xdpf; |
3749 | u16 txq_id; |
3750 | int ret; |
3751 | |
3752 | xdpf = xdp_convert_buff_to_frame(xdp); |
3753 | if (unlikely(!xdpf)) |
3754 | return MVPP2_XDP_DROPPED; |
3755 | |
3756 | /* The first of the TX queues are used for XPS, |
3757 | * the second half for XDP_TX |
3758 | */ |
3759 | txq_id = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()) + (port->ntxqs / 2); |
3760 | |
3761 | ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, dma_map: false); |
3762 | if (ret == MVPP2_XDP_TX) { |
3763 | u64_stats_update_begin(syncp: &stats->syncp); |
3764 | stats->tx_bytes += xdpf->len; |
3765 | stats->tx_packets++; |
3766 | stats->xdp_tx++; |
3767 | u64_stats_update_end(syncp: &stats->syncp); |
3768 | |
3769 | mvpp2_xdp_finish_tx(port, txq_id, nxmit: 1, nxmit_byte: xdpf->len); |
3770 | } else { |
3771 | u64_stats_update_begin(syncp: &stats->syncp); |
3772 | stats->xdp_tx_err++; |
3773 | u64_stats_update_end(syncp: &stats->syncp); |
3774 | } |
3775 | |
3776 | return ret; |
3777 | } |
3778 | |
3779 | static int |
3780 | mvpp2_xdp_xmit(struct net_device *dev, int num_frame, |
3781 | struct xdp_frame **frames, u32 flags) |
3782 | { |
3783 | struct mvpp2_port *port = netdev_priv(dev); |
3784 | int i, nxmit_byte = 0, nxmit = 0; |
3785 | struct mvpp2_pcpu_stats *stats; |
3786 | u16 txq_id; |
3787 | u32 ret; |
3788 | |
3789 | if (unlikely(test_bit(0, &port->state))) |
3790 | return -ENETDOWN; |
3791 | |
3792 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
3793 | return -EINVAL; |
3794 | |
3795 | /* The first of the TX queues are used for XPS, |
3796 | * the second half for XDP_TX |
3797 | */ |
3798 | txq_id = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()) + (port->ntxqs / 2); |
3799 | |
3800 | for (i = 0; i < num_frame; i++) { |
3801 | ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf: frames[i], dma_map: true); |
3802 | if (ret != MVPP2_XDP_TX) |
3803 | break; |
3804 | |
3805 | nxmit_byte += frames[i]->len; |
3806 | nxmit++; |
3807 | } |
3808 | |
3809 | if (likely(nxmit > 0)) |
3810 | mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); |
3811 | |
3812 | stats = this_cpu_ptr(port->stats); |
3813 | u64_stats_update_begin(syncp: &stats->syncp); |
3814 | stats->tx_bytes += nxmit_byte; |
3815 | stats->tx_packets += nxmit; |
3816 | stats->xdp_xmit += nxmit; |
3817 | stats->xdp_xmit_err += num_frame - nxmit; |
3818 | u64_stats_update_end(syncp: &stats->syncp); |
3819 | |
3820 | return nxmit; |
3821 | } |
3822 | |
3823 | static int |
3824 | mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, |
3825 | struct xdp_buff *xdp, struct page_pool *pp, |
3826 | struct mvpp2_pcpu_stats *stats) |
3827 | { |
3828 | unsigned int len, sync, err; |
3829 | struct page *page; |
3830 | u32 ret, act; |
3831 | |
3832 | len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; |
3833 | act = bpf_prog_run_xdp(prog, xdp); |
3834 | |
3835 | /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ |
3836 | sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; |
3837 | sync = max(sync, len); |
3838 | |
3839 | switch (act) { |
3840 | case XDP_PASS: |
3841 | stats->xdp_pass++; |
3842 | ret = MVPP2_XDP_PASS; |
3843 | break; |
3844 | case XDP_REDIRECT: |
3845 | err = xdp_do_redirect(dev: port->dev, xdp, prog); |
3846 | if (unlikely(err)) { |
3847 | ret = MVPP2_XDP_DROPPED; |
3848 | page = virt_to_head_page(x: xdp->data); |
3849 | page_pool_put_page(pool: pp, page, dma_sync_size: sync, allow_direct: true); |
3850 | } else { |
3851 | ret = MVPP2_XDP_REDIR; |
3852 | stats->xdp_redirect++; |
3853 | } |
3854 | break; |
3855 | case XDP_TX: |
3856 | ret = mvpp2_xdp_xmit_back(port, xdp); |
3857 | if (ret != MVPP2_XDP_TX) { |
3858 | page = virt_to_head_page(x: xdp->data); |
3859 | page_pool_put_page(pool: pp, page, dma_sync_size: sync, allow_direct: true); |
3860 | } |
3861 | break; |
3862 | default: |
3863 | bpf_warn_invalid_xdp_action(dev: port->dev, prog, act); |
3864 | fallthrough; |
3865 | case XDP_ABORTED: |
3866 | trace_xdp_exception(dev: port->dev, xdp: prog, act); |
3867 | fallthrough; |
3868 | case XDP_DROP: |
3869 | page = virt_to_head_page(x: xdp->data); |
3870 | page_pool_put_page(pool: pp, page, dma_sync_size: sync, allow_direct: true); |
3871 | ret = MVPP2_XDP_DROPPED; |
3872 | stats->xdp_drop++; |
3873 | break; |
3874 | } |
3875 | |
3876 | return ret; |
3877 | } |
3878 | |
3879 | static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, |
3880 | int pool, u32 rx_status) |
3881 | { |
3882 | phys_addr_t phys_addr, phys_addr_next; |
3883 | dma_addr_t dma_addr, dma_addr_next; |
3884 | struct mvpp2_buff_hdr *buff_hdr; |
3885 | |
3886 | phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); |
3887 | dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
3888 | |
3889 | do { |
3890 | buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(address: phys_addr); |
3891 | |
3892 | phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); |
3893 | dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); |
3894 | |
3895 | if (port->priv->hw_version >= MVPP22) { |
3896 | phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); |
3897 | dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); |
3898 | } |
3899 | |
3900 | mvpp2_bm_pool_put(port, pool, buf_dma_addr: dma_addr, buf_phys_addr: phys_addr); |
3901 | |
3902 | phys_addr = phys_addr_next; |
3903 | dma_addr = dma_addr_next; |
3904 | |
3905 | } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); |
3906 | } |
3907 | |
3908 | /* Main rx processing */ |
3909 | static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, |
3910 | int rx_todo, struct mvpp2_rx_queue *rxq) |
3911 | { |
3912 | struct net_device *dev = port->dev; |
3913 | struct mvpp2_pcpu_stats ps = {}; |
3914 | enum dma_data_direction dma_dir; |
3915 | struct bpf_prog *xdp_prog; |
3916 | struct xdp_buff xdp; |
3917 | int rx_received; |
3918 | int rx_done = 0; |
3919 | u32 xdp_ret = 0; |
3920 | |
3921 | xdp_prog = READ_ONCE(port->xdp_prog); |
3922 | |
3923 | /* Get number of received packets and clamp the to-do */ |
3924 | rx_received = mvpp2_rxq_received(port, rxq_id: rxq->id); |
3925 | if (rx_todo > rx_received) |
3926 | rx_todo = rx_received; |
3927 | |
3928 | while (rx_done < rx_todo) { |
3929 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
3930 | struct mvpp2_bm_pool *bm_pool; |
3931 | struct page_pool *pp = NULL; |
3932 | struct sk_buff *skb; |
3933 | unsigned int frag_size; |
3934 | dma_addr_t dma_addr; |
3935 | phys_addr_t phys_addr; |
3936 | u32 rx_status, timestamp; |
3937 | int pool, rx_bytes, err, ret; |
3938 | struct page *page; |
3939 | void *data; |
3940 | |
3941 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
3942 | data = (void *)phys_to_virt(address: phys_addr); |
3943 | page = virt_to_page(data); |
3944 | prefetch(page); |
3945 | |
3946 | rx_done++; |
3947 | rx_status = mvpp2_rxdesc_status_get(port, rx_desc); |
3948 | rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); |
3949 | rx_bytes -= MVPP2_MH_SIZE; |
3950 | dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); |
3951 | |
3952 | pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
3953 | MVPP2_RXD_BM_POOL_ID_OFFS; |
3954 | bm_pool = &port->priv->bm_pools[pool]; |
3955 | |
3956 | if (port->priv->percpu_pools) { |
3957 | pp = port->priv->page_pool[pool]; |
3958 | dma_dir = page_pool_get_dma_dir(pool: pp); |
3959 | } else { |
3960 | dma_dir = DMA_FROM_DEVICE; |
3961 | } |
3962 | |
3963 | dma_sync_single_for_cpu(dev: dev->dev.parent, addr: dma_addr, |
3964 | size: rx_bytes + MVPP2_MH_SIZE, |
3965 | dir: dma_dir); |
3966 | |
3967 | /* Buffer header not supported */ |
3968 | if (rx_status & MVPP2_RXD_BUF_HDR) |
3969 | goto err_drop_frame; |
3970 | |
3971 | /* In case of an error, release the requested buffer pointer |
3972 | * to the Buffer Manager. This request process is controlled |
3973 | * by the hardware, and the information about the buffer is |
3974 | * comprised by the RX descriptor. |
3975 | */ |
3976 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) |
3977 | goto err_drop_frame; |
3978 | |
3979 | /* Prefetch header */ |
3980 | prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); |
3981 | |
3982 | if (bm_pool->frag_size > PAGE_SIZE) |
3983 | frag_size = 0; |
3984 | else |
3985 | frag_size = bm_pool->frag_size; |
3986 | |
3987 | if (xdp_prog) { |
3988 | struct xdp_rxq_info *xdp_rxq; |
3989 | |
3990 | if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) |
3991 | xdp_rxq = &rxq->xdp_rxq_short; |
3992 | else |
3993 | xdp_rxq = &rxq->xdp_rxq_long; |
3994 | |
3995 | xdp_init_buff(xdp: &xdp, PAGE_SIZE, rxq: xdp_rxq); |
3996 | xdp_prepare_buff(xdp: &xdp, hard_start: data, |
3997 | MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, |
3998 | data_len: rx_bytes, meta_valid: false); |
3999 | |
4000 | ret = mvpp2_run_xdp(port, prog: xdp_prog, xdp: &xdp, pp, stats: &ps); |
4001 | |
4002 | if (ret) { |
4003 | xdp_ret |= ret; |
4004 | err = mvpp2_rx_refill(port, bm_pool, page_pool: pp, pool); |
4005 | if (err) { |
4006 | netdev_err(dev: port->dev, format: "failed to refill BM pools\n" ); |
4007 | goto err_drop_frame; |
4008 | } |
4009 | |
4010 | ps.rx_packets++; |
4011 | ps.rx_bytes += rx_bytes; |
4012 | continue; |
4013 | } |
4014 | } |
4015 | |
4016 | skb = build_skb(data, frag_size); |
4017 | if (!skb) { |
4018 | netdev_warn(dev: port->dev, format: "skb build failed\n" ); |
4019 | goto err_drop_frame; |
4020 | } |
4021 | |
4022 | /* If we have RX hardware timestamping enabled, grab the |
4023 | * timestamp from the queue and convert. |
4024 | */ |
4025 | if (mvpp22_rx_hwtstamping(port)) { |
4026 | timestamp = le32_to_cpu(rx_desc->pp22.timestamp); |
4027 | mvpp22_tai_tstamp(tai: port->priv->tai, tstamp: timestamp, |
4028 | hwtstamp: skb_hwtstamps(skb)); |
4029 | } |
4030 | |
4031 | err = mvpp2_rx_refill(port, bm_pool, page_pool: pp, pool); |
4032 | if (err) { |
4033 | netdev_err(dev: port->dev, format: "failed to refill BM pools\n" ); |
4034 | dev_kfree_skb_any(skb); |
4035 | goto err_drop_frame; |
4036 | } |
4037 | |
4038 | if (pp) |
4039 | skb_mark_for_recycle(skb); |
4040 | else |
4041 | dma_unmap_single_attrs(dev: dev->dev.parent, addr: dma_addr, |
4042 | size: bm_pool->buf_size, dir: DMA_FROM_DEVICE, |
4043 | DMA_ATTR_SKIP_CPU_SYNC); |
4044 | |
4045 | ps.rx_packets++; |
4046 | ps.rx_bytes += rx_bytes; |
4047 | |
4048 | skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); |
4049 | skb_put(skb, len: rx_bytes); |
4050 | skb->ip_summed = mvpp2_rx_csum(port, status: rx_status); |
4051 | skb->protocol = eth_type_trans(skb, dev); |
4052 | |
4053 | napi_gro_receive(napi, skb); |
4054 | continue; |
4055 | |
4056 | err_drop_frame: |
4057 | dev->stats.rx_errors++; |
4058 | mvpp2_rx_error(port, rx_desc); |
4059 | /* Return the buffer to the pool */ |
4060 | if (rx_status & MVPP2_RXD_BUF_HDR) |
4061 | mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); |
4062 | else |
4063 | mvpp2_bm_pool_put(port, pool, buf_dma_addr: dma_addr, buf_phys_addr: phys_addr); |
4064 | } |
4065 | |
4066 | if (xdp_ret & MVPP2_XDP_REDIR) |
4067 | xdp_do_flush(); |
4068 | |
4069 | if (ps.rx_packets) { |
4070 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
4071 | |
4072 | u64_stats_update_begin(syncp: &stats->syncp); |
4073 | stats->rx_packets += ps.rx_packets; |
4074 | stats->rx_bytes += ps.rx_bytes; |
4075 | /* xdp */ |
4076 | stats->xdp_redirect += ps.xdp_redirect; |
4077 | stats->xdp_pass += ps.xdp_pass; |
4078 | stats->xdp_drop += ps.xdp_drop; |
4079 | u64_stats_update_end(syncp: &stats->syncp); |
4080 | } |
4081 | |
4082 | /* Update Rx queue management counters */ |
4083 | wmb(); |
4084 | mvpp2_rxq_status_update(port, rxq_id: rxq->id, used_count: rx_done, free_count: rx_done); |
4085 | |
4086 | return rx_todo; |
4087 | } |
4088 | |
4089 | static inline void |
4090 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
4091 | struct mvpp2_tx_desc *desc) |
4092 | { |
4093 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
4094 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
4095 | |
4096 | dma_addr_t buf_dma_addr = |
4097 | mvpp2_txdesc_dma_addr_get(port, tx_desc: desc); |
4098 | size_t buf_sz = |
4099 | mvpp2_txdesc_size_get(port, tx_desc: desc); |
4100 | if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) |
4101 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, |
4102 | buf_sz, DMA_TO_DEVICE); |
4103 | mvpp2_txq_desc_put(txq); |
4104 | } |
4105 | |
4106 | static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, |
4107 | struct mvpp2_tx_desc *desc) |
4108 | { |
4109 | /* We only need to clear the low bits */ |
4110 | if (port->priv->hw_version >= MVPP22) |
4111 | desc->pp22.ptp_descriptor &= |
4112 | cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); |
4113 | } |
4114 | |
4115 | static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, |
4116 | struct mvpp2_tx_desc *tx_desc, |
4117 | struct sk_buff *skb) |
4118 | { |
4119 | struct mvpp2_hwtstamp_queue *queue; |
4120 | unsigned int mtype, type, i; |
4121 | struct ptp_header *hdr; |
4122 | u64 ptpdesc; |
4123 | |
4124 | if (port->priv->hw_version == MVPP21 || |
4125 | port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) |
4126 | return false; |
4127 | |
4128 | type = ptp_classify_raw(skb); |
4129 | if (!type) |
4130 | return false; |
4131 | |
4132 | hdr = ptp_parse_header(skb, type); |
4133 | if (!hdr) |
4134 | return false; |
4135 | |
4136 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
4137 | |
4138 | ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | |
4139 | MVPP22_PTP_ACTION_CAPTURE; |
4140 | queue = &port->tx_hwtstamp_queue[0]; |
4141 | |
4142 | switch (type & PTP_CLASS_VMASK) { |
4143 | case PTP_CLASS_V1: |
4144 | ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); |
4145 | break; |
4146 | |
4147 | case PTP_CLASS_V2: |
4148 | ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); |
4149 | mtype = hdr->tsmt & 15; |
4150 | /* Direct PTP Sync messages to queue 1 */ |
4151 | if (mtype == 0) { |
4152 | ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; |
4153 | queue = &port->tx_hwtstamp_queue[1]; |
4154 | } |
4155 | break; |
4156 | } |
4157 | |
4158 | /* Take a reference on the skb and insert into our queue */ |
4159 | i = queue->next; |
4160 | queue->next = (i + 1) & 31; |
4161 | if (queue->skb[i]) |
4162 | dev_kfree_skb_any(skb: queue->skb[i]); |
4163 | queue->skb[i] = skb_get(skb); |
4164 | |
4165 | ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); |
4166 | |
4167 | /* |
4168 | * 3:0 - PTPAction |
4169 | * 6:4 - PTPPacketFormat |
4170 | * 7 - PTP_CF_WraparoundCheckEn |
4171 | * 9:8 - IngressTimestampSeconds[1:0] |
4172 | * 10 - Reserved |
4173 | * 11 - MACTimestampingEn |
4174 | * 17:12 - PTP_TimestampQueueEntryID[5:0] |
4175 | * 18 - PTPTimestampQueueSelect |
4176 | * 19 - UDPChecksumUpdateEn |
4177 | * 27:20 - TimestampOffset |
4178 | * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header |
4179 | * NTPTs, Y.1731 - L3 to timestamp entry |
4180 | * 35:28 - UDP Checksum Offset |
4181 | * |
4182 | * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) |
4183 | */ |
4184 | tx_desc->pp22.ptp_descriptor &= |
4185 | cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); |
4186 | tx_desc->pp22.ptp_descriptor |= |
4187 | cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); |
4188 | tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); |
4189 | tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); |
4190 | |
4191 | return true; |
4192 | } |
4193 | |
4194 | /* Handle tx fragmentation processing */ |
4195 | static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, |
4196 | struct mvpp2_tx_queue *aggr_txq, |
4197 | struct mvpp2_tx_queue *txq) |
4198 | { |
4199 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
4200 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
4201 | struct mvpp2_tx_desc *tx_desc; |
4202 | int i; |
4203 | dma_addr_t buf_dma_addr; |
4204 | |
4205 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
4206 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4207 | void *addr = skb_frag_address(frag); |
4208 | |
4209 | tx_desc = mvpp2_txq_next_desc_get(txq: aggr_txq); |
4210 | mvpp2_txdesc_clear_ptp(port, desc: tx_desc); |
4211 | mvpp2_txdesc_txq_set(port, tx_desc, txq: txq->id); |
4212 | mvpp2_txdesc_size_set(port, tx_desc, size: skb_frag_size(frag)); |
4213 | |
4214 | buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, |
4215 | skb_frag_size(frag), |
4216 | DMA_TO_DEVICE); |
4217 | if (dma_mapping_error(dev: port->dev->dev.parent, dma_addr: buf_dma_addr)) { |
4218 | mvpp2_txq_desc_put(txq); |
4219 | goto cleanup; |
4220 | } |
4221 | |
4222 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr: buf_dma_addr); |
4223 | |
4224 | if (i == (skb_shinfo(skb)->nr_frags - 1)) { |
4225 | /* Last descriptor */ |
4226 | mvpp2_txdesc_cmd_set(port, tx_desc, |
4227 | MVPP2_TXD_L_DESC); |
4228 | mvpp2_txq_inc_put(port, txq_pcpu, data: skb, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4229 | } else { |
4230 | /* Descriptor in the middle: Not First, Not Last */ |
4231 | mvpp2_txdesc_cmd_set(port, tx_desc, command: 0); |
4232 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4233 | } |
4234 | } |
4235 | |
4236 | return 0; |
4237 | cleanup: |
4238 | /* Release all descriptors that were used to map fragments of |
4239 | * this packet, as well as the corresponding DMA mappings |
4240 | */ |
4241 | for (i = i - 1; i >= 0; i--) { |
4242 | tx_desc = txq->descs + i; |
4243 | tx_desc_unmap_put(port, txq, desc: tx_desc); |
4244 | } |
4245 | |
4246 | return -ENOMEM; |
4247 | } |
4248 | |
4249 | static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, |
4250 | struct net_device *dev, |
4251 | struct mvpp2_tx_queue *txq, |
4252 | struct mvpp2_tx_queue *aggr_txq, |
4253 | struct mvpp2_txq_pcpu *txq_pcpu, |
4254 | int hdr_sz) |
4255 | { |
4256 | struct mvpp2_port *port = netdev_priv(dev); |
4257 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(txq: aggr_txq); |
4258 | dma_addr_t addr; |
4259 | |
4260 | mvpp2_txdesc_clear_ptp(port, desc: tx_desc); |
4261 | mvpp2_txdesc_txq_set(port, tx_desc, txq: txq->id); |
4262 | mvpp2_txdesc_size_set(port, tx_desc, size: hdr_sz); |
4263 | |
4264 | addr = txq_pcpu->tso_headers_dma + |
4265 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; |
4266 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr: addr); |
4267 | |
4268 | mvpp2_txdesc_cmd_set(port, tx_desc, command: mvpp2_skb_tx_csum(port, skb) | |
4269 | MVPP2_TXD_F_DESC | |
4270 | MVPP2_TXD_PADDING_DISABLE); |
4271 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4272 | } |
4273 | |
4274 | static inline int mvpp2_tso_put_data(struct sk_buff *skb, |
4275 | struct net_device *dev, struct tso_t *tso, |
4276 | struct mvpp2_tx_queue *txq, |
4277 | struct mvpp2_tx_queue *aggr_txq, |
4278 | struct mvpp2_txq_pcpu *txq_pcpu, |
4279 | int sz, bool left, bool last) |
4280 | { |
4281 | struct mvpp2_port *port = netdev_priv(dev); |
4282 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(txq: aggr_txq); |
4283 | dma_addr_t buf_dma_addr; |
4284 | |
4285 | mvpp2_txdesc_clear_ptp(port, desc: tx_desc); |
4286 | mvpp2_txdesc_txq_set(port, tx_desc, txq: txq->id); |
4287 | mvpp2_txdesc_size_set(port, tx_desc, size: sz); |
4288 | |
4289 | buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, |
4290 | DMA_TO_DEVICE); |
4291 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { |
4292 | mvpp2_txq_desc_put(txq); |
4293 | return -ENOMEM; |
4294 | } |
4295 | |
4296 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr: buf_dma_addr); |
4297 | |
4298 | if (!left) { |
4299 | mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); |
4300 | if (last) { |
4301 | mvpp2_txq_inc_put(port, txq_pcpu, data: skb, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4302 | return 0; |
4303 | } |
4304 | } else { |
4305 | mvpp2_txdesc_cmd_set(port, tx_desc, command: 0); |
4306 | } |
4307 | |
4308 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4309 | return 0; |
4310 | } |
4311 | |
4312 | static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, |
4313 | struct mvpp2_tx_queue *txq, |
4314 | struct mvpp2_tx_queue *aggr_txq, |
4315 | struct mvpp2_txq_pcpu *txq_pcpu) |
4316 | { |
4317 | struct mvpp2_port *port = netdev_priv(dev); |
4318 | int hdr_sz, i, len, descs = 0; |
4319 | struct tso_t tso; |
4320 | |
4321 | /* Check number of available descriptors */ |
4322 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, num: tso_count_descs(skb)) || |
4323 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, |
4324 | num: tso_count_descs(skb))) |
4325 | return 0; |
4326 | |
4327 | hdr_sz = tso_start(skb, tso: &tso); |
4328 | |
4329 | len = skb->len - hdr_sz; |
4330 | while (len > 0) { |
4331 | int left = min_t(int, skb_shinfo(skb)->gso_size, len); |
4332 | char *hdr = txq_pcpu->tso_headers + |
4333 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; |
4334 | |
4335 | len -= left; |
4336 | descs++; |
4337 | |
4338 | tso_build_hdr(skb, hdr, tso: &tso, size: left, is_last: len == 0); |
4339 | mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); |
4340 | |
4341 | while (left > 0) { |
4342 | int sz = min_t(int, tso.size, left); |
4343 | left -= sz; |
4344 | descs++; |
4345 | |
4346 | if (mvpp2_tso_put_data(skb, dev, tso: &tso, txq, aggr_txq, |
4347 | txq_pcpu, sz, left, last: len == 0)) |
4348 | goto release; |
4349 | tso_build_data(skb, tso: &tso, size: sz); |
4350 | } |
4351 | } |
4352 | |
4353 | return descs; |
4354 | |
4355 | release: |
4356 | for (i = descs - 1; i >= 0; i--) { |
4357 | struct mvpp2_tx_desc *tx_desc = txq->descs + i; |
4358 | tx_desc_unmap_put(port, txq, desc: tx_desc); |
4359 | } |
4360 | return 0; |
4361 | } |
4362 | |
4363 | /* Main tx processing */ |
4364 | static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) |
4365 | { |
4366 | struct mvpp2_port *port = netdev_priv(dev); |
4367 | struct mvpp2_tx_queue *txq, *aggr_txq; |
4368 | struct mvpp2_txq_pcpu *txq_pcpu; |
4369 | struct mvpp2_tx_desc *tx_desc; |
4370 | dma_addr_t buf_dma_addr; |
4371 | unsigned long flags = 0; |
4372 | unsigned int thread; |
4373 | int frags = 0; |
4374 | u16 txq_id; |
4375 | u32 tx_cmd; |
4376 | |
4377 | thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
4378 | |
4379 | txq_id = skb_get_queue_mapping(skb); |
4380 | txq = port->txqs[txq_id]; |
4381 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
4382 | aggr_txq = &port->priv->aggr_txqs[thread]; |
4383 | |
4384 | if (test_bit(thread, &port->priv->lock_map)) |
4385 | spin_lock_irqsave(&port->tx_lock[thread], flags); |
4386 | |
4387 | if (skb_is_gso(skb)) { |
4388 | frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); |
4389 | goto out; |
4390 | } |
4391 | frags = skb_shinfo(skb)->nr_frags + 1; |
4392 | |
4393 | /* Check number of available descriptors */ |
4394 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, num: frags) || |
4395 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, num: frags)) { |
4396 | frags = 0; |
4397 | goto out; |
4398 | } |
4399 | |
4400 | /* Get a descriptor for the first part of the packet */ |
4401 | tx_desc = mvpp2_txq_next_desc_get(txq: aggr_txq); |
4402 | if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || |
4403 | !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) |
4404 | mvpp2_txdesc_clear_ptp(port, desc: tx_desc); |
4405 | mvpp2_txdesc_txq_set(port, tx_desc, txq: txq->id); |
4406 | mvpp2_txdesc_size_set(port, tx_desc, size: skb_headlen(skb)); |
4407 | |
4408 | buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, |
4409 | skb_headlen(skb), DMA_TO_DEVICE); |
4410 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { |
4411 | mvpp2_txq_desc_put(txq); |
4412 | frags = 0; |
4413 | goto out; |
4414 | } |
4415 | |
4416 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr: buf_dma_addr); |
4417 | |
4418 | tx_cmd = mvpp2_skb_tx_csum(port, skb); |
4419 | |
4420 | if (frags == 1) { |
4421 | /* First and Last descriptor */ |
4422 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; |
4423 | mvpp2_txdesc_cmd_set(port, tx_desc, command: tx_cmd); |
4424 | mvpp2_txq_inc_put(port, txq_pcpu, data: skb, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4425 | } else { |
4426 | /* First but not Last */ |
4427 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; |
4428 | mvpp2_txdesc_cmd_set(port, tx_desc, command: tx_cmd); |
4429 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, buf_type: MVPP2_TYPE_SKB); |
4430 | |
4431 | /* Continue with other skb fragments */ |
4432 | if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { |
4433 | tx_desc_unmap_put(port, txq, desc: tx_desc); |
4434 | frags = 0; |
4435 | } |
4436 | } |
4437 | |
4438 | out: |
4439 | if (frags > 0) { |
4440 | struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); |
4441 | struct netdev_queue *nq = netdev_get_tx_queue(dev, index: txq_id); |
4442 | |
4443 | txq_pcpu->reserved_num -= frags; |
4444 | txq_pcpu->count += frags; |
4445 | aggr_txq->count += frags; |
4446 | |
4447 | /* Enable transmit */ |
4448 | wmb(); |
4449 | mvpp2_aggr_txq_pend_desc_add(port, pending: frags); |
4450 | |
4451 | if (txq_pcpu->count >= txq_pcpu->stop_threshold) |
4452 | netif_tx_stop_queue(dev_queue: nq); |
4453 | |
4454 | u64_stats_update_begin(syncp: &stats->syncp); |
4455 | stats->tx_packets++; |
4456 | stats->tx_bytes += skb->len; |
4457 | u64_stats_update_end(syncp: &stats->syncp); |
4458 | } else { |
4459 | dev->stats.tx_dropped++; |
4460 | dev_kfree_skb_any(skb); |
4461 | } |
4462 | |
4463 | /* Finalize TX processing */ |
4464 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
4465 | mvpp2_txq_done(port, txq, txq_pcpu); |
4466 | |
4467 | /* Set the timer in case not all frags were processed */ |
4468 | if (!port->has_tx_irqs && txq_pcpu->count <= frags && |
4469 | txq_pcpu->count > 0) { |
4470 | struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); |
4471 | |
4472 | if (!port_pcpu->timer_scheduled) { |
4473 | port_pcpu->timer_scheduled = true; |
4474 | hrtimer_start(timer: &port_pcpu->tx_done_timer, |
4475 | MVPP2_TXDONE_HRTIMER_PERIOD_NS, |
4476 | mode: HRTIMER_MODE_REL_PINNED_SOFT); |
4477 | } |
4478 | } |
4479 | |
4480 | if (test_bit(thread, &port->priv->lock_map)) |
4481 | spin_unlock_irqrestore(lock: &port->tx_lock[thread], flags); |
4482 | |
4483 | return NETDEV_TX_OK; |
4484 | } |
4485 | |
4486 | static inline void mvpp2_cause_error(struct net_device *dev, int cause) |
4487 | { |
4488 | if (cause & MVPP2_CAUSE_FCS_ERR_MASK) |
4489 | netdev_err(dev, format: "FCS error\n" ); |
4490 | if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) |
4491 | netdev_err(dev, format: "rx fifo overrun error\n" ); |
4492 | if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) |
4493 | netdev_err(dev, format: "tx fifo underrun error\n" ); |
4494 | } |
4495 | |
4496 | static int mvpp2_poll(struct napi_struct *napi, int budget) |
4497 | { |
4498 | u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; |
4499 | int rx_done = 0; |
4500 | struct mvpp2_port *port = netdev_priv(dev: napi->dev); |
4501 | struct mvpp2_queue_vector *qv; |
4502 | unsigned int thread = mvpp2_cpu_to_thread(priv: port->priv, smp_processor_id()); |
4503 | |
4504 | qv = container_of(napi, struct mvpp2_queue_vector, napi); |
4505 | |
4506 | /* Rx/Tx cause register |
4507 | * |
4508 | * Bits 0-15: each bit indicates received packets on the Rx queue |
4509 | * (bit 0 is for Rx queue 0). |
4510 | * |
4511 | * Bits 16-23: each bit indicates transmitted packets on the Tx queue |
4512 | * (bit 16 is for Tx queue 0). |
4513 | * |
4514 | * Each CPU has its own Rx/Tx cause register |
4515 | */ |
4516 | cause_rx_tx = mvpp2_thread_read_relaxed(priv: port->priv, thread: qv->sw_thread_id, |
4517 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); |
4518 | |
4519 | cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; |
4520 | if (cause_misc) { |
4521 | mvpp2_cause_error(dev: port->dev, cause: cause_misc); |
4522 | |
4523 | /* Clear the cause register */ |
4524 | mvpp2_write(priv: port->priv, MVPP2_ISR_MISC_CAUSE_REG, data: 0); |
4525 | mvpp2_thread_write(priv: port->priv, thread, |
4526 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id), |
4527 | data: cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); |
4528 | } |
4529 | |
4530 | if (port->has_tx_irqs) { |
4531 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
4532 | if (cause_tx) { |
4533 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; |
4534 | mvpp2_tx_done(port, cause: cause_tx, thread: qv->sw_thread_id); |
4535 | } |
4536 | } |
4537 | |
4538 | /* Process RX packets */ |
4539 | cause_rx = cause_rx_tx & |
4540 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); |
4541 | cause_rx <<= qv->first_rxq; |
4542 | cause_rx |= qv->pending_cause_rx; |
4543 | while (cause_rx && budget > 0) { |
4544 | int count; |
4545 | struct mvpp2_rx_queue *rxq; |
4546 | |
4547 | rxq = mvpp2_get_rx_queue(port, cause: cause_rx); |
4548 | if (!rxq) |
4549 | break; |
4550 | |
4551 | count = mvpp2_rx(port, napi, rx_todo: budget, rxq); |
4552 | rx_done += count; |
4553 | budget -= count; |
4554 | if (budget > 0) { |
4555 | /* Clear the bit associated to this Rx queue |
4556 | * so that next iteration will continue from |
4557 | * the next Rx queue. |
4558 | */ |
4559 | cause_rx &= ~(1 << rxq->logic_rxq); |
4560 | } |
4561 | } |
4562 | |
4563 | if (budget > 0) { |
4564 | cause_rx = 0; |
4565 | napi_complete_done(n: napi, work_done: rx_done); |
4566 | |
4567 | mvpp2_qvec_interrupt_enable(qvec: qv); |
4568 | } |
4569 | qv->pending_cause_rx = cause_rx; |
4570 | return rx_done; |
4571 | } |
4572 | |
4573 | static void mvpp22_mode_reconfigure(struct mvpp2_port *port, |
4574 | phy_interface_t interface) |
4575 | { |
4576 | u32 ctrl3; |
4577 | |
4578 | /* Set the GMAC & XLG MAC in reset */ |
4579 | mvpp2_mac_reset_assert(port); |
4580 | |
4581 | /* Set the MPCS and XPCS in reset */ |
4582 | mvpp22_pcs_reset_assert(port); |
4583 | |
4584 | /* comphy reconfiguration */ |
4585 | mvpp22_comphy_init(port, interface); |
4586 | |
4587 | /* gop reconfiguration */ |
4588 | mvpp22_gop_init(port, interface); |
4589 | |
4590 | mvpp22_pcs_reset_deassert(port, interface); |
4591 | |
4592 | if (mvpp2_port_supports_xlg(port)) { |
4593 | ctrl3 = readl(addr: port->base + MVPP22_XLG_CTRL3_REG); |
4594 | ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; |
4595 | |
4596 | if (mvpp2_is_xlg(interface)) |
4597 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; |
4598 | else |
4599 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; |
4600 | |
4601 | writel(val: ctrl3, addr: port->base + MVPP22_XLG_CTRL3_REG); |
4602 | } |
4603 | |
4604 | if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) |
4605 | mvpp2_xlg_max_rx_size_set(port); |
4606 | else |
4607 | mvpp2_gmac_max_rx_size_set(port); |
4608 | } |
4609 | |
4610 | /* Set hw internals when starting port */ |
4611 | static void mvpp2_start_dev(struct mvpp2_port *port) |
4612 | { |
4613 | int i; |
4614 | |
4615 | mvpp2_txp_max_tx_size_set(port); |
4616 | |
4617 | for (i = 0; i < port->nqvecs; i++) |
4618 | napi_enable(n: &port->qvecs[i].napi); |
4619 | |
4620 | /* Enable interrupts on all threads */ |
4621 | mvpp2_interrupts_enable(port); |
4622 | |
4623 | if (port->priv->hw_version >= MVPP22) |
4624 | mvpp22_mode_reconfigure(port, interface: port->phy_interface); |
4625 | |
4626 | if (port->phylink) { |
4627 | phylink_start(port->phylink); |
4628 | } else { |
4629 | mvpp2_acpi_start(port); |
4630 | } |
4631 | |
4632 | netif_tx_start_all_queues(dev: port->dev); |
4633 | |
4634 | clear_bit(nr: 0, addr: &port->state); |
4635 | } |
4636 | |
4637 | /* Set hw internals when stopping port */ |
4638 | static void mvpp2_stop_dev(struct mvpp2_port *port) |
4639 | { |
4640 | int i; |
4641 | |
4642 | set_bit(nr: 0, addr: &port->state); |
4643 | |
4644 | /* Disable interrupts on all threads */ |
4645 | mvpp2_interrupts_disable(port); |
4646 | |
4647 | for (i = 0; i < port->nqvecs; i++) |
4648 | napi_disable(n: &port->qvecs[i].napi); |
4649 | |
4650 | if (port->phylink) |
4651 | phylink_stop(port->phylink); |
4652 | phy_power_off(phy: port->comphy); |
4653 | } |
4654 | |
4655 | static int mvpp2_check_ringparam_valid(struct net_device *dev, |
4656 | struct ethtool_ringparam *ring) |
4657 | { |
4658 | u16 new_rx_pending = ring->rx_pending; |
4659 | u16 new_tx_pending = ring->tx_pending; |
4660 | |
4661 | if (ring->rx_pending == 0 || ring->tx_pending == 0) |
4662 | return -EINVAL; |
4663 | |
4664 | if (ring->rx_pending > MVPP2_MAX_RXD_MAX) |
4665 | new_rx_pending = MVPP2_MAX_RXD_MAX; |
4666 | else if (ring->rx_pending < MSS_THRESHOLD_START) |
4667 | new_rx_pending = MSS_THRESHOLD_START; |
4668 | else if (!IS_ALIGNED(ring->rx_pending, 16)) |
4669 | new_rx_pending = ALIGN(ring->rx_pending, 16); |
4670 | |
4671 | if (ring->tx_pending > MVPP2_MAX_TXD_MAX) |
4672 | new_tx_pending = MVPP2_MAX_TXD_MAX; |
4673 | else if (!IS_ALIGNED(ring->tx_pending, 32)) |
4674 | new_tx_pending = ALIGN(ring->tx_pending, 32); |
4675 | |
4676 | /* The Tx ring size cannot be smaller than the minimum number of |
4677 | * descriptors needed for TSO. |
4678 | */ |
4679 | if (new_tx_pending < MVPP2_MAX_SKB_DESCS) |
4680 | new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); |
4681 | |
4682 | if (ring->rx_pending != new_rx_pending) { |
4683 | netdev_info(dev, format: "illegal Rx ring size value %d, round to %d\n" , |
4684 | ring->rx_pending, new_rx_pending); |
4685 | ring->rx_pending = new_rx_pending; |
4686 | } |
4687 | |
4688 | if (ring->tx_pending != new_tx_pending) { |
4689 | netdev_info(dev, format: "illegal Tx ring size value %d, round to %d\n" , |
4690 | ring->tx_pending, new_tx_pending); |
4691 | ring->tx_pending = new_tx_pending; |
4692 | } |
4693 | |
4694 | return 0; |
4695 | } |
4696 | |
4697 | static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) |
4698 | { |
4699 | u32 mac_addr_l, mac_addr_m, mac_addr_h; |
4700 | |
4701 | mac_addr_l = readl(addr: port->base + MVPP2_GMAC_CTRL_1_REG); |
4702 | mac_addr_m = readl(addr: port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); |
4703 | mac_addr_h = readl(addr: port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); |
4704 | addr[0] = (mac_addr_h >> 24) & 0xFF; |
4705 | addr[1] = (mac_addr_h >> 16) & 0xFF; |
4706 | addr[2] = (mac_addr_h >> 8) & 0xFF; |
4707 | addr[3] = mac_addr_h & 0xFF; |
4708 | addr[4] = mac_addr_m & 0xFF; |
4709 | addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; |
4710 | } |
4711 | |
4712 | static int mvpp2_irqs_init(struct mvpp2_port *port) |
4713 | { |
4714 | int err, i; |
4715 | |
4716 | for (i = 0; i < port->nqvecs; i++) { |
4717 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
4718 | |
4719 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
4720 | qv->mask = kzalloc(size: cpumask_size(), GFP_KERNEL); |
4721 | if (!qv->mask) { |
4722 | err = -ENOMEM; |
4723 | goto err; |
4724 | } |
4725 | |
4726 | irq_set_status_flags(irq: qv->irq, set: IRQ_NO_BALANCING); |
4727 | } |
4728 | |
4729 | err = request_irq(irq: qv->irq, handler: mvpp2_isr, flags: 0, name: port->dev->name, dev: qv); |
4730 | if (err) |
4731 | goto err; |
4732 | |
4733 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
4734 | unsigned int cpu; |
4735 | |
4736 | for_each_present_cpu(cpu) { |
4737 | if (mvpp2_cpu_to_thread(priv: port->priv, cpu) == |
4738 | qv->sw_thread_id) |
4739 | cpumask_set_cpu(cpu, dstp: qv->mask); |
4740 | } |
4741 | |
4742 | irq_set_affinity_hint(irq: qv->irq, m: qv->mask); |
4743 | } |
4744 | } |
4745 | |
4746 | return 0; |
4747 | err: |
4748 | for (i = 0; i < port->nqvecs; i++) { |
4749 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
4750 | |
4751 | irq_set_affinity_hint(irq: qv->irq, NULL); |
4752 | kfree(objp: qv->mask); |
4753 | qv->mask = NULL; |
4754 | free_irq(qv->irq, qv); |
4755 | } |
4756 | |
4757 | return err; |
4758 | } |
4759 | |
4760 | static void mvpp2_irqs_deinit(struct mvpp2_port *port) |
4761 | { |
4762 | int i; |
4763 | |
4764 | for (i = 0; i < port->nqvecs; i++) { |
4765 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
4766 | |
4767 | irq_set_affinity_hint(irq: qv->irq, NULL); |
4768 | kfree(objp: qv->mask); |
4769 | qv->mask = NULL; |
4770 | irq_clear_status_flags(irq: qv->irq, clr: IRQ_NO_BALANCING); |
4771 | free_irq(qv->irq, qv); |
4772 | } |
4773 | } |
4774 | |
4775 | static bool (struct mvpp2_port *port) |
4776 | { |
4777 | return (queue_mode == MVPP2_QDIST_MULTI_MODE) && |
4778 | !(port->flags & MVPP2_F_LOOPBACK); |
4779 | } |
4780 | |
4781 | static int mvpp2_open(struct net_device *dev) |
4782 | { |
4783 | struct mvpp2_port *port = netdev_priv(dev); |
4784 | struct mvpp2 *priv = port->priv; |
4785 | unsigned char mac_bcast[ETH_ALEN] = { |
4786 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
4787 | bool valid = false; |
4788 | int err; |
4789 | |
4790 | err = mvpp2_prs_mac_da_accept(port, da: mac_bcast, add: true); |
4791 | if (err) { |
4792 | netdev_err(dev, format: "mvpp2_prs_mac_da_accept BC failed\n" ); |
4793 | return err; |
4794 | } |
4795 | err = mvpp2_prs_mac_da_accept(port, da: dev->dev_addr, add: true); |
4796 | if (err) { |
4797 | netdev_err(dev, format: "mvpp2_prs_mac_da_accept own addr failed\n" ); |
4798 | return err; |
4799 | } |
4800 | err = mvpp2_prs_tag_mode_set(priv: port->priv, port: port->id, type: MVPP2_TAG_TYPE_MH); |
4801 | if (err) { |
4802 | netdev_err(dev, format: "mvpp2_prs_tag_mode_set failed\n" ); |
4803 | return err; |
4804 | } |
4805 | err = mvpp2_prs_def_flow(port); |
4806 | if (err) { |
4807 | netdev_err(dev, format: "mvpp2_prs_def_flow failed\n" ); |
4808 | return err; |
4809 | } |
4810 | |
4811 | /* Allocate the Rx/Tx queues */ |
4812 | err = mvpp2_setup_rxqs(port); |
4813 | if (err) { |
4814 | netdev_err(dev: port->dev, format: "cannot allocate Rx queues\n" ); |
4815 | return err; |
4816 | } |
4817 | |
4818 | err = mvpp2_setup_txqs(port); |
4819 | if (err) { |
4820 | netdev_err(dev: port->dev, format: "cannot allocate Tx queues\n" ); |
4821 | goto err_cleanup_rxqs; |
4822 | } |
4823 | |
4824 | err = mvpp2_irqs_init(port); |
4825 | if (err) { |
4826 | netdev_err(dev: port->dev, format: "cannot init IRQs\n" ); |
4827 | goto err_cleanup_txqs; |
4828 | } |
4829 | |
4830 | if (port->phylink) { |
4831 | err = phylink_fwnode_phy_connect(pl: port->phylink, fwnode: port->fwnode, flags: 0); |
4832 | if (err) { |
4833 | netdev_err(dev: port->dev, format: "could not attach PHY (%d)\n" , |
4834 | err); |
4835 | goto err_free_irq; |
4836 | } |
4837 | |
4838 | valid = true; |
4839 | } |
4840 | |
4841 | if (priv->hw_version >= MVPP22 && port->port_irq) { |
4842 | err = request_irq(irq: port->port_irq, handler: mvpp2_port_isr, flags: 0, |
4843 | name: dev->name, dev: port); |
4844 | if (err) { |
4845 | netdev_err(dev: port->dev, |
4846 | format: "cannot request port link/ptp IRQ %d\n" , |
4847 | port->port_irq); |
4848 | goto err_free_irq; |
4849 | } |
4850 | |
4851 | mvpp22_gop_setup_irq(port); |
4852 | |
4853 | /* In default link is down */ |
4854 | netif_carrier_off(dev: port->dev); |
4855 | |
4856 | valid = true; |
4857 | } else { |
4858 | port->port_irq = 0; |
4859 | } |
4860 | |
4861 | if (!valid) { |
4862 | netdev_err(dev: port->dev, |
4863 | format: "invalid configuration: no dt or link IRQ" ); |
4864 | err = -ENOENT; |
4865 | goto err_free_irq; |
4866 | } |
4867 | |
4868 | /* Unmask interrupts on all CPUs */ |
4869 | on_each_cpu(func: mvpp2_interrupts_unmask, info: port, wait: 1); |
4870 | mvpp2_shared_interrupt_mask_unmask(port, mask: false); |
4871 | |
4872 | mvpp2_start_dev(port); |
4873 | |
4874 | /* Start hardware statistics gathering */ |
4875 | queue_delayed_work(wq: priv->stats_queue, dwork: &port->stats_work, |
4876 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
4877 | |
4878 | return 0; |
4879 | |
4880 | err_free_irq: |
4881 | mvpp2_irqs_deinit(port); |
4882 | err_cleanup_txqs: |
4883 | mvpp2_cleanup_txqs(port); |
4884 | err_cleanup_rxqs: |
4885 | mvpp2_cleanup_rxqs(port); |
4886 | return err; |
4887 | } |
4888 | |
4889 | static int mvpp2_stop(struct net_device *dev) |
4890 | { |
4891 | struct mvpp2_port *port = netdev_priv(dev); |
4892 | struct mvpp2_port_pcpu *port_pcpu; |
4893 | unsigned int thread; |
4894 | |
4895 | mvpp2_stop_dev(port); |
4896 | |
4897 | /* Mask interrupts on all threads */ |
4898 | on_each_cpu(func: mvpp2_interrupts_mask, info: port, wait: 1); |
4899 | mvpp2_shared_interrupt_mask_unmask(port, mask: true); |
4900 | |
4901 | if (port->phylink) |
4902 | phylink_disconnect_phy(port->phylink); |
4903 | if (port->port_irq) |
4904 | free_irq(port->port_irq, port); |
4905 | |
4906 | mvpp2_irqs_deinit(port); |
4907 | if (!port->has_tx_irqs) { |
4908 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
4909 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
4910 | |
4911 | hrtimer_cancel(timer: &port_pcpu->tx_done_timer); |
4912 | port_pcpu->timer_scheduled = false; |
4913 | } |
4914 | } |
4915 | mvpp2_cleanup_rxqs(port); |
4916 | mvpp2_cleanup_txqs(port); |
4917 | |
4918 | cancel_delayed_work_sync(dwork: &port->stats_work); |
4919 | |
4920 | mvpp2_mac_reset_assert(port); |
4921 | mvpp22_pcs_reset_assert(port); |
4922 | |
4923 | return 0; |
4924 | } |
4925 | |
4926 | static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, |
4927 | struct netdev_hw_addr_list *list) |
4928 | { |
4929 | struct netdev_hw_addr *ha; |
4930 | int ret; |
4931 | |
4932 | netdev_hw_addr_list_for_each(ha, list) { |
4933 | ret = mvpp2_prs_mac_da_accept(port, da: ha->addr, add: true); |
4934 | if (ret) |
4935 | return ret; |
4936 | } |
4937 | |
4938 | return 0; |
4939 | } |
4940 | |
4941 | static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) |
4942 | { |
4943 | if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
4944 | mvpp2_prs_vid_enable_filtering(port); |
4945 | else |
4946 | mvpp2_prs_vid_disable_filtering(port); |
4947 | |
4948 | mvpp2_prs_mac_promisc_set(priv: port->priv, port: port->id, |
4949 | l2_cast: MVPP2_PRS_L2_UNI_CAST, add: enable); |
4950 | |
4951 | mvpp2_prs_mac_promisc_set(priv: port->priv, port: port->id, |
4952 | l2_cast: MVPP2_PRS_L2_MULTI_CAST, add: enable); |
4953 | } |
4954 | |
4955 | static void mvpp2_set_rx_mode(struct net_device *dev) |
4956 | { |
4957 | struct mvpp2_port *port = netdev_priv(dev); |
4958 | |
4959 | /* Clear the whole UC and MC list */ |
4960 | mvpp2_prs_mac_del_all(port); |
4961 | |
4962 | if (dev->flags & IFF_PROMISC) { |
4963 | mvpp2_set_rx_promisc(port, enable: true); |
4964 | return; |
4965 | } |
4966 | |
4967 | mvpp2_set_rx_promisc(port, enable: false); |
4968 | |
4969 | if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || |
4970 | mvpp2_prs_mac_da_accept_list(port, list: &dev->uc)) |
4971 | mvpp2_prs_mac_promisc_set(priv: port->priv, port: port->id, |
4972 | l2_cast: MVPP2_PRS_L2_UNI_CAST, add: true); |
4973 | |
4974 | if (dev->flags & IFF_ALLMULTI) { |
4975 | mvpp2_prs_mac_promisc_set(priv: port->priv, port: port->id, |
4976 | l2_cast: MVPP2_PRS_L2_MULTI_CAST, add: true); |
4977 | return; |
4978 | } |
4979 | |
4980 | if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || |
4981 | mvpp2_prs_mac_da_accept_list(port, list: &dev->mc)) |
4982 | mvpp2_prs_mac_promisc_set(priv: port->priv, port: port->id, |
4983 | l2_cast: MVPP2_PRS_L2_MULTI_CAST, add: true); |
4984 | } |
4985 | |
4986 | static int mvpp2_set_mac_address(struct net_device *dev, void *p) |
4987 | { |
4988 | const struct sockaddr *addr = p; |
4989 | int err; |
4990 | |
4991 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
4992 | return -EADDRNOTAVAIL; |
4993 | |
4994 | err = mvpp2_prs_update_mac_da(dev, da: addr->sa_data); |
4995 | if (err) { |
4996 | /* Reconfigure parser accept the original MAC address */ |
4997 | mvpp2_prs_update_mac_da(dev, da: dev->dev_addr); |
4998 | netdev_err(dev, format: "failed to change MAC address\n" ); |
4999 | } |
5000 | return err; |
5001 | } |
5002 | |
5003 | /* Shut down all the ports, reconfigure the pools as percpu or shared, |
5004 | * then bring up again all ports. |
5005 | */ |
5006 | static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) |
5007 | { |
5008 | bool change_percpu = (percpu != priv->percpu_pools); |
5009 | int numbufs = MVPP2_BM_POOLS_NUM, i; |
5010 | struct mvpp2_port *port = NULL; |
5011 | bool status[MVPP2_MAX_PORTS]; |
5012 | |
5013 | for (i = 0; i < priv->port_count; i++) { |
5014 | port = priv->port_list[i]; |
5015 | status[i] = netif_running(dev: port->dev); |
5016 | if (status[i]) |
5017 | mvpp2_stop(dev: port->dev); |
5018 | } |
5019 | |
5020 | /* nrxqs is the same for all ports */ |
5021 | if (priv->percpu_pools) |
5022 | numbufs = port->nrxqs * 2; |
5023 | |
5024 | if (change_percpu) |
5025 | mvpp2_bm_pool_update_priv_fc(priv, en: false); |
5026 | |
5027 | for (i = 0; i < numbufs; i++) |
5028 | mvpp2_bm_pool_destroy(dev: port->dev->dev.parent, priv, bm_pool: &priv->bm_pools[i]); |
5029 | |
5030 | devm_kfree(dev: port->dev->dev.parent, p: priv->bm_pools); |
5031 | priv->percpu_pools = percpu; |
5032 | mvpp2_bm_init(dev: port->dev->dev.parent, priv); |
5033 | |
5034 | for (i = 0; i < priv->port_count; i++) { |
5035 | port = priv->port_list[i]; |
5036 | if (percpu && port->ntxqs >= num_possible_cpus() * 2) |
5037 | xdp_set_features_flag(dev: port->dev, |
5038 | val: NETDEV_XDP_ACT_BASIC | |
5039 | NETDEV_XDP_ACT_REDIRECT | |
5040 | NETDEV_XDP_ACT_NDO_XMIT); |
5041 | else |
5042 | xdp_clear_features_flag(dev: port->dev); |
5043 | |
5044 | mvpp2_swf_bm_pool_init(port); |
5045 | if (status[i]) |
5046 | mvpp2_open(dev: port->dev); |
5047 | } |
5048 | |
5049 | if (change_percpu) |
5050 | mvpp2_bm_pool_update_priv_fc(priv, en: true); |
5051 | |
5052 | return 0; |
5053 | } |
5054 | |
5055 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) |
5056 | { |
5057 | struct mvpp2_port *port = netdev_priv(dev); |
5058 | bool running = netif_running(dev); |
5059 | struct mvpp2 *priv = port->priv; |
5060 | int err; |
5061 | |
5062 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
5063 | netdev_info(dev, format: "illegal MTU value %d, round to %d\n" , mtu, |
5064 | ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); |
5065 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); |
5066 | } |
5067 | |
5068 | if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { |
5069 | netdev_err(dev, format: "Illegal MTU value %d (> %d) for XDP mode\n" , |
5070 | mtu, (int)MVPP2_MAX_RX_BUF_SIZE); |
5071 | return -EINVAL; |
5072 | } |
5073 | |
5074 | if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { |
5075 | if (priv->percpu_pools) { |
5076 | netdev_warn(dev, format: "mtu %d too high, switching to shared buffers" , mtu); |
5077 | mvpp2_bm_switch_buffers(priv, percpu: false); |
5078 | } |
5079 | } else { |
5080 | bool jumbo = false; |
5081 | int i; |
5082 | |
5083 | for (i = 0; i < priv->port_count; i++) |
5084 | if (priv->port_list[i] != port && |
5085 | MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > |
5086 | MVPP2_BM_LONG_PKT_SIZE) { |
5087 | jumbo = true; |
5088 | break; |
5089 | } |
5090 | |
5091 | /* No port is using jumbo frames */ |
5092 | if (!jumbo) { |
5093 | dev_info(port->dev->dev.parent, |
5094 | "all ports have a low MTU, switching to per-cpu buffers" ); |
5095 | mvpp2_bm_switch_buffers(priv, percpu: true); |
5096 | } |
5097 | } |
5098 | |
5099 | if (running) |
5100 | mvpp2_stop_dev(port); |
5101 | |
5102 | err = mvpp2_bm_update_mtu(dev, mtu); |
5103 | if (err) { |
5104 | netdev_err(dev, format: "failed to change MTU\n" ); |
5105 | /* Reconfigure BM to the original MTU */ |
5106 | mvpp2_bm_update_mtu(dev, mtu: dev->mtu); |
5107 | } else { |
5108 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
5109 | } |
5110 | |
5111 | if (running) { |
5112 | mvpp2_start_dev(port); |
5113 | mvpp2_egress_enable(port); |
5114 | mvpp2_ingress_enable(port); |
5115 | } |
5116 | |
5117 | return err; |
5118 | } |
5119 | |
5120 | static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) |
5121 | { |
5122 | enum dma_data_direction dma_dir = DMA_FROM_DEVICE; |
5123 | struct mvpp2 *priv = port->priv; |
5124 | int err = -1, i; |
5125 | |
5126 | if (!priv->percpu_pools) |
5127 | return err; |
5128 | |
5129 | if (!priv->page_pool[0]) |
5130 | return -ENOMEM; |
5131 | |
5132 | for (i = 0; i < priv->port_count; i++) { |
5133 | port = priv->port_list[i]; |
5134 | if (port->xdp_prog) { |
5135 | dma_dir = DMA_BIDIRECTIONAL; |
5136 | break; |
5137 | } |
5138 | } |
5139 | |
5140 | /* All pools are equal in terms of DMA direction */ |
5141 | if (priv->page_pool[0]->p.dma_dir != dma_dir) |
5142 | err = mvpp2_bm_switch_buffers(priv, percpu: true); |
5143 | |
5144 | return err; |
5145 | } |
5146 | |
5147 | static void |
5148 | mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
5149 | { |
5150 | struct mvpp2_port *port = netdev_priv(dev); |
5151 | unsigned int start; |
5152 | unsigned int cpu; |
5153 | |
5154 | for_each_possible_cpu(cpu) { |
5155 | struct mvpp2_pcpu_stats *cpu_stats; |
5156 | u64 rx_packets; |
5157 | u64 rx_bytes; |
5158 | u64 tx_packets; |
5159 | u64 tx_bytes; |
5160 | |
5161 | cpu_stats = per_cpu_ptr(port->stats, cpu); |
5162 | do { |
5163 | start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp); |
5164 | rx_packets = cpu_stats->rx_packets; |
5165 | rx_bytes = cpu_stats->rx_bytes; |
5166 | tx_packets = cpu_stats->tx_packets; |
5167 | tx_bytes = cpu_stats->tx_bytes; |
5168 | } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start)); |
5169 | |
5170 | stats->rx_packets += rx_packets; |
5171 | stats->rx_bytes += rx_bytes; |
5172 | stats->tx_packets += tx_packets; |
5173 | stats->tx_bytes += tx_bytes; |
5174 | } |
5175 | |
5176 | stats->rx_errors = dev->stats.rx_errors; |
5177 | stats->rx_dropped = dev->stats.rx_dropped; |
5178 | stats->tx_dropped = dev->stats.tx_dropped; |
5179 | } |
5180 | |
5181 | static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) |
5182 | { |
5183 | struct hwtstamp_config config; |
5184 | void __iomem *ptp; |
5185 | u32 gcr, int_mask; |
5186 | |
5187 | if (copy_from_user(to: &config, from: ifr->ifr_data, n: sizeof(config))) |
5188 | return -EFAULT; |
5189 | |
5190 | if (config.tx_type != HWTSTAMP_TX_OFF && |
5191 | config.tx_type != HWTSTAMP_TX_ON) |
5192 | return -ERANGE; |
5193 | |
5194 | ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
5195 | |
5196 | int_mask = gcr = 0; |
5197 | if (config.tx_type != HWTSTAMP_TX_OFF) { |
5198 | gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; |
5199 | int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | |
5200 | MVPP22_PTP_INT_MASK_QUEUE0; |
5201 | } |
5202 | |
5203 | /* It seems we must also release the TX reset when enabling the TSU */ |
5204 | if (config.rx_filter != HWTSTAMP_FILTER_NONE) |
5205 | gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | |
5206 | MVPP22_PTP_GCR_TX_RESET; |
5207 | |
5208 | if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) |
5209 | mvpp22_tai_start(tai: port->priv->tai); |
5210 | |
5211 | if (config.rx_filter != HWTSTAMP_FILTER_NONE) { |
5212 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
5213 | mvpp2_modify(ptr: ptp + MVPP22_PTP_GCR, |
5214 | MVPP22_PTP_GCR_RX_RESET | |
5215 | MVPP22_PTP_GCR_TX_RESET | |
5216 | MVPP22_PTP_GCR_TSU_ENABLE, set: gcr); |
5217 | port->rx_hwtstamp = true; |
5218 | } else { |
5219 | port->rx_hwtstamp = false; |
5220 | mvpp2_modify(ptr: ptp + MVPP22_PTP_GCR, |
5221 | MVPP22_PTP_GCR_RX_RESET | |
5222 | MVPP22_PTP_GCR_TX_RESET | |
5223 | MVPP22_PTP_GCR_TSU_ENABLE, set: gcr); |
5224 | } |
5225 | |
5226 | mvpp2_modify(ptr: ptp + MVPP22_PTP_INT_MASK, |
5227 | MVPP22_PTP_INT_MASK_QUEUE1 | |
5228 | MVPP22_PTP_INT_MASK_QUEUE0, set: int_mask); |
5229 | |
5230 | if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) |
5231 | mvpp22_tai_stop(tai: port->priv->tai); |
5232 | |
5233 | port->tx_hwtstamp_type = config.tx_type; |
5234 | |
5235 | if (copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config))) |
5236 | return -EFAULT; |
5237 | |
5238 | return 0; |
5239 | } |
5240 | |
5241 | static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) |
5242 | { |
5243 | struct hwtstamp_config config; |
5244 | |
5245 | memset(&config, 0, sizeof(config)); |
5246 | |
5247 | config.tx_type = port->tx_hwtstamp_type; |
5248 | config.rx_filter = port->rx_hwtstamp ? |
5249 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; |
5250 | |
5251 | if (copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config))) |
5252 | return -EFAULT; |
5253 | |
5254 | return 0; |
5255 | } |
5256 | |
5257 | static int mvpp2_ethtool_get_ts_info(struct net_device *dev, |
5258 | struct ethtool_ts_info *info) |
5259 | { |
5260 | struct mvpp2_port *port = netdev_priv(dev); |
5261 | |
5262 | if (!port->hwtstamp) |
5263 | return -EOPNOTSUPP; |
5264 | |
5265 | info->phc_index = mvpp22_tai_ptp_clock_index(tai: port->priv->tai); |
5266 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
5267 | SOF_TIMESTAMPING_RX_SOFTWARE | |
5268 | SOF_TIMESTAMPING_SOFTWARE | |
5269 | SOF_TIMESTAMPING_TX_HARDWARE | |
5270 | SOF_TIMESTAMPING_RX_HARDWARE | |
5271 | SOF_TIMESTAMPING_RAW_HARDWARE; |
5272 | info->tx_types = BIT(HWTSTAMP_TX_OFF) | |
5273 | BIT(HWTSTAMP_TX_ON); |
5274 | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | |
5275 | BIT(HWTSTAMP_FILTER_ALL); |
5276 | |
5277 | return 0; |
5278 | } |
5279 | |
5280 | static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
5281 | { |
5282 | struct mvpp2_port *port = netdev_priv(dev); |
5283 | |
5284 | switch (cmd) { |
5285 | case SIOCSHWTSTAMP: |
5286 | if (port->hwtstamp) |
5287 | return mvpp2_set_ts_config(port, ifr); |
5288 | break; |
5289 | |
5290 | case SIOCGHWTSTAMP: |
5291 | if (port->hwtstamp) |
5292 | return mvpp2_get_ts_config(port, ifr); |
5293 | break; |
5294 | } |
5295 | |
5296 | if (!port->phylink) |
5297 | return -ENOTSUPP; |
5298 | |
5299 | return phylink_mii_ioctl(port->phylink, ifr, cmd); |
5300 | } |
5301 | |
5302 | static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
5303 | { |
5304 | struct mvpp2_port *port = netdev_priv(dev); |
5305 | int ret; |
5306 | |
5307 | ret = mvpp2_prs_vid_entry_add(port, vid); |
5308 | if (ret) |
5309 | netdev_err(dev, format: "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n" , |
5310 | MVPP2_PRS_VLAN_FILT_MAX - 1); |
5311 | return ret; |
5312 | } |
5313 | |
5314 | static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
5315 | { |
5316 | struct mvpp2_port *port = netdev_priv(dev); |
5317 | |
5318 | mvpp2_prs_vid_entry_remove(port, vid); |
5319 | return 0; |
5320 | } |
5321 | |
5322 | static int mvpp2_set_features(struct net_device *dev, |
5323 | netdev_features_t features) |
5324 | { |
5325 | netdev_features_t changed = dev->features ^ features; |
5326 | struct mvpp2_port *port = netdev_priv(dev); |
5327 | |
5328 | if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { |
5329 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { |
5330 | mvpp2_prs_vid_enable_filtering(port); |
5331 | } else { |
5332 | /* Invalidate all registered VID filters for this |
5333 | * port |
5334 | */ |
5335 | mvpp2_prs_vid_remove_all(port); |
5336 | |
5337 | mvpp2_prs_vid_disable_filtering(port); |
5338 | } |
5339 | } |
5340 | |
5341 | if (changed & NETIF_F_RXHASH) { |
5342 | if (features & NETIF_F_RXHASH) |
5343 | mvpp22_port_rss_enable(port); |
5344 | else |
5345 | mvpp22_port_rss_disable(port); |
5346 | } |
5347 | |
5348 | return 0; |
5349 | } |
5350 | |
5351 | static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) |
5352 | { |
5353 | struct bpf_prog *prog = bpf->prog, *old_prog; |
5354 | bool running = netif_running(dev: port->dev); |
5355 | bool reset = !prog != !port->xdp_prog; |
5356 | |
5357 | if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { |
5358 | NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP" ); |
5359 | return -EOPNOTSUPP; |
5360 | } |
5361 | |
5362 | if (!port->priv->percpu_pools) { |
5363 | NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP" ); |
5364 | return -EOPNOTSUPP; |
5365 | } |
5366 | |
5367 | if (port->ntxqs < num_possible_cpus() * 2) { |
5368 | NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU" ); |
5369 | return -EOPNOTSUPP; |
5370 | } |
5371 | |
5372 | /* device is up and bpf is added/removed, must setup the RX queues */ |
5373 | if (running && reset) |
5374 | mvpp2_stop(dev: port->dev); |
5375 | |
5376 | old_prog = xchg(&port->xdp_prog, prog); |
5377 | if (old_prog) |
5378 | bpf_prog_put(prog: old_prog); |
5379 | |
5380 | /* bpf is just replaced, RXQ and MTU are already setup */ |
5381 | if (!reset) |
5382 | return 0; |
5383 | |
5384 | /* device was up, restore the link */ |
5385 | if (running) |
5386 | mvpp2_open(dev: port->dev); |
5387 | |
5388 | /* Check Page Pool DMA Direction */ |
5389 | mvpp2_check_pagepool_dma(port); |
5390 | |
5391 | return 0; |
5392 | } |
5393 | |
5394 | static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
5395 | { |
5396 | struct mvpp2_port *port = netdev_priv(dev); |
5397 | |
5398 | switch (xdp->command) { |
5399 | case XDP_SETUP_PROG: |
5400 | return mvpp2_xdp_setup(port, bpf: xdp); |
5401 | default: |
5402 | return -EINVAL; |
5403 | } |
5404 | } |
5405 | |
5406 | /* Ethtool methods */ |
5407 | |
5408 | static int mvpp2_ethtool_nway_reset(struct net_device *dev) |
5409 | { |
5410 | struct mvpp2_port *port = netdev_priv(dev); |
5411 | |
5412 | if (!port->phylink) |
5413 | return -ENOTSUPP; |
5414 | |
5415 | return phylink_ethtool_nway_reset(port->phylink); |
5416 | } |
5417 | |
5418 | /* Set interrupt coalescing for ethtools */ |
5419 | static int |
5420 | mvpp2_ethtool_set_coalesce(struct net_device *dev, |
5421 | struct ethtool_coalesce *c, |
5422 | struct kernel_ethtool_coalesce *kernel_coal, |
5423 | struct netlink_ext_ack *extack) |
5424 | { |
5425 | struct mvpp2_port *port = netdev_priv(dev); |
5426 | int queue; |
5427 | |
5428 | for (queue = 0; queue < port->nrxqs; queue++) { |
5429 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
5430 | |
5431 | rxq->time_coal = c->rx_coalesce_usecs; |
5432 | rxq->pkts_coal = c->rx_max_coalesced_frames; |
5433 | mvpp2_rx_pkts_coal_set(port, rxq); |
5434 | mvpp2_rx_time_coal_set(port, rxq); |
5435 | } |
5436 | |
5437 | if (port->has_tx_irqs) { |
5438 | port->tx_time_coal = c->tx_coalesce_usecs; |
5439 | mvpp2_tx_time_coal_set(port); |
5440 | } |
5441 | |
5442 | for (queue = 0; queue < port->ntxqs; queue++) { |
5443 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
5444 | |
5445 | txq->done_pkts_coal = c->tx_max_coalesced_frames; |
5446 | |
5447 | if (port->has_tx_irqs) |
5448 | mvpp2_tx_pkts_coal_set(port, txq); |
5449 | } |
5450 | |
5451 | return 0; |
5452 | } |
5453 | |
5454 | /* get coalescing for ethtools */ |
5455 | static int |
5456 | mvpp2_ethtool_get_coalesce(struct net_device *dev, |
5457 | struct ethtool_coalesce *c, |
5458 | struct kernel_ethtool_coalesce *kernel_coal, |
5459 | struct netlink_ext_ack *extack) |
5460 | { |
5461 | struct mvpp2_port *port = netdev_priv(dev); |
5462 | |
5463 | c->rx_coalesce_usecs = port->rxqs[0]->time_coal; |
5464 | c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; |
5465 | c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; |
5466 | c->tx_coalesce_usecs = port->tx_time_coal; |
5467 | return 0; |
5468 | } |
5469 | |
5470 | static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, |
5471 | struct ethtool_drvinfo *drvinfo) |
5472 | { |
5473 | strscpy(drvinfo->driver, MVPP2_DRIVER_NAME, |
5474 | sizeof(drvinfo->driver)); |
5475 | strscpy(drvinfo->version, MVPP2_DRIVER_VERSION, |
5476 | sizeof(drvinfo->version)); |
5477 | strscpy(drvinfo->bus_info, dev_name(&dev->dev), |
5478 | sizeof(drvinfo->bus_info)); |
5479 | } |
5480 | |
5481 | static void |
5482 | mvpp2_ethtool_get_ringparam(struct net_device *dev, |
5483 | struct ethtool_ringparam *ring, |
5484 | struct kernel_ethtool_ringparam *kernel_ring, |
5485 | struct netlink_ext_ack *extack) |
5486 | { |
5487 | struct mvpp2_port *port = netdev_priv(dev); |
5488 | |
5489 | ring->rx_max_pending = MVPP2_MAX_RXD_MAX; |
5490 | ring->tx_max_pending = MVPP2_MAX_TXD_MAX; |
5491 | ring->rx_pending = port->rx_ring_size; |
5492 | ring->tx_pending = port->tx_ring_size; |
5493 | } |
5494 | |
5495 | static int |
5496 | mvpp2_ethtool_set_ringparam(struct net_device *dev, |
5497 | struct ethtool_ringparam *ring, |
5498 | struct kernel_ethtool_ringparam *kernel_ring, |
5499 | struct netlink_ext_ack *extack) |
5500 | { |
5501 | struct mvpp2_port *port = netdev_priv(dev); |
5502 | u16 prev_rx_ring_size = port->rx_ring_size; |
5503 | u16 prev_tx_ring_size = port->tx_ring_size; |
5504 | int err; |
5505 | |
5506 | err = mvpp2_check_ringparam_valid(dev, ring); |
5507 | if (err) |
5508 | return err; |
5509 | |
5510 | if (!netif_running(dev)) { |
5511 | port->rx_ring_size = ring->rx_pending; |
5512 | port->tx_ring_size = ring->tx_pending; |
5513 | return 0; |
5514 | } |
5515 | |
5516 | /* The interface is running, so we have to force a |
5517 | * reallocation of the queues |
5518 | */ |
5519 | mvpp2_stop_dev(port); |
5520 | mvpp2_cleanup_rxqs(port); |
5521 | mvpp2_cleanup_txqs(port); |
5522 | |
5523 | port->rx_ring_size = ring->rx_pending; |
5524 | port->tx_ring_size = ring->tx_pending; |
5525 | |
5526 | err = mvpp2_setup_rxqs(port); |
5527 | if (err) { |
5528 | /* Reallocate Rx queues with the original ring size */ |
5529 | port->rx_ring_size = prev_rx_ring_size; |
5530 | ring->rx_pending = prev_rx_ring_size; |
5531 | err = mvpp2_setup_rxqs(port); |
5532 | if (err) |
5533 | goto err_out; |
5534 | } |
5535 | err = mvpp2_setup_txqs(port); |
5536 | if (err) { |
5537 | /* Reallocate Tx queues with the original ring size */ |
5538 | port->tx_ring_size = prev_tx_ring_size; |
5539 | ring->tx_pending = prev_tx_ring_size; |
5540 | err = mvpp2_setup_txqs(port); |
5541 | if (err) |
5542 | goto err_clean_rxqs; |
5543 | } |
5544 | |
5545 | mvpp2_start_dev(port); |
5546 | mvpp2_egress_enable(port); |
5547 | mvpp2_ingress_enable(port); |
5548 | |
5549 | return 0; |
5550 | |
5551 | err_clean_rxqs: |
5552 | mvpp2_cleanup_rxqs(port); |
5553 | err_out: |
5554 | netdev_err(dev, format: "failed to change ring parameters" ); |
5555 | return err; |
5556 | } |
5557 | |
5558 | static void mvpp2_ethtool_get_pause_param(struct net_device *dev, |
5559 | struct ethtool_pauseparam *pause) |
5560 | { |
5561 | struct mvpp2_port *port = netdev_priv(dev); |
5562 | |
5563 | if (!port->phylink) |
5564 | return; |
5565 | |
5566 | phylink_ethtool_get_pauseparam(port->phylink, pause); |
5567 | } |
5568 | |
5569 | static int mvpp2_ethtool_set_pause_param(struct net_device *dev, |
5570 | struct ethtool_pauseparam *pause) |
5571 | { |
5572 | struct mvpp2_port *port = netdev_priv(dev); |
5573 | |
5574 | if (!port->phylink) |
5575 | return -ENOTSUPP; |
5576 | |
5577 | return phylink_ethtool_set_pauseparam(port->phylink, pause); |
5578 | } |
5579 | |
5580 | static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, |
5581 | struct ethtool_link_ksettings *cmd) |
5582 | { |
5583 | struct mvpp2_port *port = netdev_priv(dev); |
5584 | |
5585 | if (!port->phylink) |
5586 | return -ENOTSUPP; |
5587 | |
5588 | return phylink_ethtool_ksettings_get(port->phylink, cmd); |
5589 | } |
5590 | |
5591 | static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, |
5592 | const struct ethtool_link_ksettings *cmd) |
5593 | { |
5594 | struct mvpp2_port *port = netdev_priv(dev); |
5595 | |
5596 | if (!port->phylink) |
5597 | return -ENOTSUPP; |
5598 | |
5599 | return phylink_ethtool_ksettings_set(port->phylink, cmd); |
5600 | } |
5601 | |
5602 | static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, |
5603 | struct ethtool_rxnfc *info, u32 *rules) |
5604 | { |
5605 | struct mvpp2_port *port = netdev_priv(dev); |
5606 | int ret = 0, i, loc = 0; |
5607 | |
5608 | if (!mvpp22_rss_is_supported(port)) |
5609 | return -EOPNOTSUPP; |
5610 | |
5611 | switch (info->cmd) { |
5612 | case ETHTOOL_GRXFH: |
5613 | ret = mvpp2_ethtool_rxfh_get(port, info); |
5614 | break; |
5615 | case ETHTOOL_GRXRINGS: |
5616 | info->data = port->nrxqs; |
5617 | break; |
5618 | case ETHTOOL_GRXCLSRLCNT: |
5619 | info->rule_cnt = port->n_rfs_rules; |
5620 | break; |
5621 | case ETHTOOL_GRXCLSRULE: |
5622 | ret = mvpp2_ethtool_cls_rule_get(port, rxnfc: info); |
5623 | break; |
5624 | case ETHTOOL_GRXCLSRLALL: |
5625 | for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { |
5626 | if (loc == info->rule_cnt) { |
5627 | ret = -EMSGSIZE; |
5628 | break; |
5629 | } |
5630 | |
5631 | if (port->rfs_rules[i]) |
5632 | rules[loc++] = i; |
5633 | } |
5634 | break; |
5635 | default: |
5636 | return -ENOTSUPP; |
5637 | } |
5638 | |
5639 | return ret; |
5640 | } |
5641 | |
5642 | static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, |
5643 | struct ethtool_rxnfc *info) |
5644 | { |
5645 | struct mvpp2_port *port = netdev_priv(dev); |
5646 | int ret = 0; |
5647 | |
5648 | if (!mvpp22_rss_is_supported(port)) |
5649 | return -EOPNOTSUPP; |
5650 | |
5651 | switch (info->cmd) { |
5652 | case ETHTOOL_SRXFH: |
5653 | ret = mvpp2_ethtool_rxfh_set(port, info); |
5654 | break; |
5655 | case ETHTOOL_SRXCLSRLINS: |
5656 | ret = mvpp2_ethtool_cls_rule_ins(port, info); |
5657 | break; |
5658 | case ETHTOOL_SRXCLSRLDEL: |
5659 | ret = mvpp2_ethtool_cls_rule_del(port, info); |
5660 | break; |
5661 | default: |
5662 | return -EOPNOTSUPP; |
5663 | } |
5664 | return ret; |
5665 | } |
5666 | |
5667 | static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) |
5668 | { |
5669 | struct mvpp2_port *port = netdev_priv(dev); |
5670 | |
5671 | return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; |
5672 | } |
5673 | |
5674 | static int mvpp2_ethtool_get_rxfh(struct net_device *dev, |
5675 | struct ethtool_rxfh_param *rxfh) |
5676 | { |
5677 | struct mvpp2_port *port = netdev_priv(dev); |
5678 | u32 = rxfh->rss_context; |
5679 | int ret = 0; |
5680 | |
5681 | if (!mvpp22_rss_is_supported(port)) |
5682 | return -EOPNOTSUPP; |
5683 | if (rss_context >= MVPP22_N_RSS_TABLES) |
5684 | return -EINVAL; |
5685 | |
5686 | rxfh->hfunc = ETH_RSS_HASH_CRC32; |
5687 | |
5688 | if (rxfh->indir) |
5689 | ret = mvpp22_port_rss_ctx_indir_get(port, rss_ctx: rss_context, |
5690 | indir: rxfh->indir); |
5691 | |
5692 | return ret; |
5693 | } |
5694 | |
5695 | static int mvpp2_ethtool_set_rxfh(struct net_device *dev, |
5696 | struct ethtool_rxfh_param *rxfh, |
5697 | struct netlink_ext_ack *extack) |
5698 | { |
5699 | struct mvpp2_port *port = netdev_priv(dev); |
5700 | u32 * = &rxfh->rss_context; |
5701 | int ret = 0; |
5702 | |
5703 | if (!mvpp22_rss_is_supported(port)) |
5704 | return -EOPNOTSUPP; |
5705 | |
5706 | if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && |
5707 | rxfh->hfunc != ETH_RSS_HASH_CRC32) |
5708 | return -EOPNOTSUPP; |
5709 | |
5710 | if (rxfh->key) |
5711 | return -EOPNOTSUPP; |
5712 | |
5713 | if (*rss_context && rxfh->rss_delete) |
5714 | return mvpp22_port_rss_ctx_delete(port, rss_ctx: *rss_context); |
5715 | |
5716 | if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { |
5717 | ret = mvpp22_port_rss_ctx_create(port, rss_ctx: rss_context); |
5718 | if (ret) |
5719 | return ret; |
5720 | } |
5721 | |
5722 | if (rxfh->indir) |
5723 | ret = mvpp22_port_rss_ctx_indir_set(port, rss_ctx: *rss_context, |
5724 | indir: rxfh->indir); |
5725 | |
5726 | return ret; |
5727 | } |
5728 | |
5729 | /* Device ops */ |
5730 | |
5731 | static const struct net_device_ops mvpp2_netdev_ops = { |
5732 | .ndo_open = mvpp2_open, |
5733 | .ndo_stop = mvpp2_stop, |
5734 | .ndo_start_xmit = mvpp2_tx, |
5735 | .ndo_set_rx_mode = mvpp2_set_rx_mode, |
5736 | .ndo_set_mac_address = mvpp2_set_mac_address, |
5737 | .ndo_change_mtu = mvpp2_change_mtu, |
5738 | .ndo_get_stats64 = mvpp2_get_stats64, |
5739 | .ndo_eth_ioctl = mvpp2_ioctl, |
5740 | .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, |
5741 | .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, |
5742 | .ndo_set_features = mvpp2_set_features, |
5743 | .ndo_bpf = mvpp2_xdp, |
5744 | .ndo_xdp_xmit = mvpp2_xdp_xmit, |
5745 | }; |
5746 | |
5747 | static const struct ethtool_ops mvpp2_eth_tool_ops = { |
5748 | .cap_rss_ctx_supported = true, |
5749 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
5750 | ETHTOOL_COALESCE_MAX_FRAMES, |
5751 | .nway_reset = mvpp2_ethtool_nway_reset, |
5752 | .get_link = ethtool_op_get_link, |
5753 | .get_ts_info = mvpp2_ethtool_get_ts_info, |
5754 | .set_coalesce = mvpp2_ethtool_set_coalesce, |
5755 | .get_coalesce = mvpp2_ethtool_get_coalesce, |
5756 | .get_drvinfo = mvpp2_ethtool_get_drvinfo, |
5757 | .get_ringparam = mvpp2_ethtool_get_ringparam, |
5758 | .set_ringparam = mvpp2_ethtool_set_ringparam, |
5759 | .get_strings = mvpp2_ethtool_get_strings, |
5760 | .get_ethtool_stats = mvpp2_ethtool_get_stats, |
5761 | .get_sset_count = mvpp2_ethtool_get_sset_count, |
5762 | .get_pauseparam = mvpp2_ethtool_get_pause_param, |
5763 | .set_pauseparam = mvpp2_ethtool_set_pause_param, |
5764 | .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, |
5765 | .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, |
5766 | .get_rxnfc = mvpp2_ethtool_get_rxnfc, |
5767 | .set_rxnfc = mvpp2_ethtool_set_rxnfc, |
5768 | .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, |
5769 | .get_rxfh = mvpp2_ethtool_get_rxfh, |
5770 | .set_rxfh = mvpp2_ethtool_set_rxfh, |
5771 | }; |
5772 | |
5773 | /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that |
5774 | * had a single IRQ defined per-port. |
5775 | */ |
5776 | static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, |
5777 | struct device_node *port_node) |
5778 | { |
5779 | struct mvpp2_queue_vector *v = &port->qvecs[0]; |
5780 | |
5781 | v->first_rxq = 0; |
5782 | v->nrxqs = port->nrxqs; |
5783 | v->type = MVPP2_QUEUE_VECTOR_SHARED; |
5784 | v->sw_thread_id = 0; |
5785 | v->sw_thread_mask = *cpumask_bits(cpu_online_mask); |
5786 | v->port = port; |
5787 | v->irq = irq_of_parse_and_map(node: port_node, index: 0); |
5788 | if (v->irq <= 0) |
5789 | return -EINVAL; |
5790 | netif_napi_add(dev: port->dev, napi: &v->napi, poll: mvpp2_poll); |
5791 | |
5792 | port->nqvecs = 1; |
5793 | |
5794 | return 0; |
5795 | } |
5796 | |
5797 | static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, |
5798 | struct device_node *port_node) |
5799 | { |
5800 | struct mvpp2 *priv = port->priv; |
5801 | struct mvpp2_queue_vector *v; |
5802 | int i, ret; |
5803 | |
5804 | switch (queue_mode) { |
5805 | case MVPP2_QDIST_SINGLE_MODE: |
5806 | port->nqvecs = priv->nthreads + 1; |
5807 | break; |
5808 | case MVPP2_QDIST_MULTI_MODE: |
5809 | port->nqvecs = priv->nthreads; |
5810 | break; |
5811 | } |
5812 | |
5813 | for (i = 0; i < port->nqvecs; i++) { |
5814 | char irqname[16]; |
5815 | |
5816 | v = port->qvecs + i; |
5817 | |
5818 | v->port = port; |
5819 | v->type = MVPP2_QUEUE_VECTOR_PRIVATE; |
5820 | v->sw_thread_id = i; |
5821 | v->sw_thread_mask = BIT(i); |
5822 | |
5823 | if (port->flags & MVPP2_F_DT_COMPAT) |
5824 | snprintf(buf: irqname, size: sizeof(irqname), fmt: "tx-cpu%d" , i); |
5825 | else |
5826 | snprintf(buf: irqname, size: sizeof(irqname), fmt: "hif%d" , i); |
5827 | |
5828 | if (queue_mode == MVPP2_QDIST_MULTI_MODE) { |
5829 | v->first_rxq = i; |
5830 | v->nrxqs = 1; |
5831 | } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && |
5832 | i == (port->nqvecs - 1)) { |
5833 | v->first_rxq = 0; |
5834 | v->nrxqs = port->nrxqs; |
5835 | v->type = MVPP2_QUEUE_VECTOR_SHARED; |
5836 | |
5837 | if (port->flags & MVPP2_F_DT_COMPAT) |
5838 | strscpy(irqname, "rx-shared" , sizeof(irqname)); |
5839 | } |
5840 | |
5841 | if (port_node) |
5842 | v->irq = of_irq_get_byname(dev: port_node, name: irqname); |
5843 | else |
5844 | v->irq = fwnode_irq_get(fwnode: port->fwnode, index: i); |
5845 | if (v->irq <= 0) { |
5846 | ret = -EINVAL; |
5847 | goto err; |
5848 | } |
5849 | |
5850 | netif_napi_add(dev: port->dev, napi: &v->napi, poll: mvpp2_poll); |
5851 | } |
5852 | |
5853 | return 0; |
5854 | |
5855 | err: |
5856 | for (i = 0; i < port->nqvecs; i++) |
5857 | irq_dispose_mapping(virq: port->qvecs[i].irq); |
5858 | return ret; |
5859 | } |
5860 | |
5861 | static int mvpp2_queue_vectors_init(struct mvpp2_port *port, |
5862 | struct device_node *port_node) |
5863 | { |
5864 | if (port->has_tx_irqs) |
5865 | return mvpp2_multi_queue_vectors_init(port, port_node); |
5866 | else |
5867 | return mvpp2_simple_queue_vectors_init(port, port_node); |
5868 | } |
5869 | |
5870 | static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) |
5871 | { |
5872 | int i; |
5873 | |
5874 | for (i = 0; i < port->nqvecs; i++) |
5875 | irq_dispose_mapping(virq: port->qvecs[i].irq); |
5876 | } |
5877 | |
5878 | /* Configure Rx queue group interrupt for this port */ |
5879 | static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) |
5880 | { |
5881 | struct mvpp2 *priv = port->priv; |
5882 | u32 val; |
5883 | int i; |
5884 | |
5885 | if (priv->hw_version == MVPP21) { |
5886 | mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), |
5887 | data: port->nrxqs); |
5888 | return; |
5889 | } |
5890 | |
5891 | /* Handle the more complicated PPv2.2 and PPv2.3 case */ |
5892 | for (i = 0; i < port->nqvecs; i++) { |
5893 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
5894 | |
5895 | if (!qv->nrxqs) |
5896 | continue; |
5897 | |
5898 | val = qv->sw_thread_id; |
5899 | val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; |
5900 | mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, data: val); |
5901 | |
5902 | val = qv->first_rxq; |
5903 | val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; |
5904 | mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, data: val); |
5905 | } |
5906 | } |
5907 | |
5908 | /* Initialize port HW */ |
5909 | static int mvpp2_port_init(struct mvpp2_port *port) |
5910 | { |
5911 | struct device *dev = port->dev->dev.parent; |
5912 | struct mvpp2 *priv = port->priv; |
5913 | struct mvpp2_txq_pcpu *txq_pcpu; |
5914 | unsigned int thread; |
5915 | int queue, err, val; |
5916 | |
5917 | /* Checks for hardware constraints */ |
5918 | if (port->first_rxq + port->nrxqs > |
5919 | MVPP2_MAX_PORTS * priv->max_port_rxqs) |
5920 | return -EINVAL; |
5921 | |
5922 | if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) |
5923 | return -EINVAL; |
5924 | |
5925 | /* Disable port */ |
5926 | mvpp2_egress_disable(port); |
5927 | mvpp2_port_disable(port); |
5928 | |
5929 | if (mvpp2_is_xlg(interface: port->phy_interface)) { |
5930 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
5931 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
5932 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; |
5933 | writel(val, addr: port->base + MVPP22_XLG_CTRL0_REG); |
5934 | } else { |
5935 | val = readl(addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
5936 | val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
5937 | val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
5938 | writel(val, addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
5939 | } |
5940 | |
5941 | port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; |
5942 | |
5943 | port->txqs = devm_kcalloc(dev, n: port->ntxqs, size: sizeof(*port->txqs), |
5944 | GFP_KERNEL); |
5945 | if (!port->txqs) |
5946 | return -ENOMEM; |
5947 | |
5948 | /* Associate physical Tx queues to this port and initialize. |
5949 | * The mapping is predefined. |
5950 | */ |
5951 | for (queue = 0; queue < port->ntxqs; queue++) { |
5952 | int queue_phy_id = mvpp2_txq_phys(port: port->id, txq: queue); |
5953 | struct mvpp2_tx_queue *txq; |
5954 | |
5955 | txq = devm_kzalloc(dev, size: sizeof(*txq), GFP_KERNEL); |
5956 | if (!txq) { |
5957 | err = -ENOMEM; |
5958 | goto err_free_percpu; |
5959 | } |
5960 | |
5961 | txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); |
5962 | if (!txq->pcpu) { |
5963 | err = -ENOMEM; |
5964 | goto err_free_percpu; |
5965 | } |
5966 | |
5967 | txq->id = queue_phy_id; |
5968 | txq->log_id = queue; |
5969 | txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; |
5970 | for (thread = 0; thread < priv->nthreads; thread++) { |
5971 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
5972 | txq_pcpu->thread = thread; |
5973 | } |
5974 | |
5975 | port->txqs[queue] = txq; |
5976 | } |
5977 | |
5978 | port->rxqs = devm_kcalloc(dev, n: port->nrxqs, size: sizeof(*port->rxqs), |
5979 | GFP_KERNEL); |
5980 | if (!port->rxqs) { |
5981 | err = -ENOMEM; |
5982 | goto err_free_percpu; |
5983 | } |
5984 | |
5985 | /* Allocate and initialize Rx queue for this port */ |
5986 | for (queue = 0; queue < port->nrxqs; queue++) { |
5987 | struct mvpp2_rx_queue *rxq; |
5988 | |
5989 | /* Map physical Rx queue to port's logical Rx queue */ |
5990 | rxq = devm_kzalloc(dev, size: sizeof(*rxq), GFP_KERNEL); |
5991 | if (!rxq) { |
5992 | err = -ENOMEM; |
5993 | goto err_free_percpu; |
5994 | } |
5995 | /* Map this Rx queue to a physical queue */ |
5996 | rxq->id = port->first_rxq + queue; |
5997 | rxq->port = port->id; |
5998 | rxq->logic_rxq = queue; |
5999 | |
6000 | port->rxqs[queue] = rxq; |
6001 | } |
6002 | |
6003 | mvpp2_rx_irqs_setup(port); |
6004 | |
6005 | /* Create Rx descriptor rings */ |
6006 | for (queue = 0; queue < port->nrxqs; queue++) { |
6007 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
6008 | |
6009 | rxq->size = port->rx_ring_size; |
6010 | rxq->pkts_coal = MVPP2_RX_COAL_PKTS; |
6011 | rxq->time_coal = MVPP2_RX_COAL_USEC; |
6012 | } |
6013 | |
6014 | mvpp2_ingress_disable(port); |
6015 | |
6016 | /* Port default configuration */ |
6017 | mvpp2_defaults_set(port); |
6018 | |
6019 | /* Port's classifier configuration */ |
6020 | mvpp2_cls_oversize_rxq_set(port); |
6021 | mvpp2_cls_port_config(port); |
6022 | |
6023 | if (mvpp22_rss_is_supported(port)) |
6024 | mvpp22_port_rss_init(port); |
6025 | |
6026 | /* Provide an initial Rx packet size */ |
6027 | port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); |
6028 | |
6029 | /* Initialize pools for swf */ |
6030 | err = mvpp2_swf_bm_pool_init(port); |
6031 | if (err) |
6032 | goto err_free_percpu; |
6033 | |
6034 | /* Clear all port stats */ |
6035 | mvpp2_read_stats(port); |
6036 | memset(port->ethtool_stats, 0, |
6037 | MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); |
6038 | |
6039 | return 0; |
6040 | |
6041 | err_free_percpu: |
6042 | for (queue = 0; queue < port->ntxqs; queue++) { |
6043 | if (!port->txqs[queue]) |
6044 | continue; |
6045 | free_percpu(pdata: port->txqs[queue]->pcpu); |
6046 | } |
6047 | return err; |
6048 | } |
6049 | |
6050 | static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, |
6051 | unsigned long *flags) |
6052 | { |
6053 | char *irqs[5] = { "rx-shared" , "tx-cpu0" , "tx-cpu1" , "tx-cpu2" , |
6054 | "tx-cpu3" }; |
6055 | int i; |
6056 | |
6057 | for (i = 0; i < 5; i++) |
6058 | if (of_property_match_string(np: port_node, propname: "interrupt-names" , |
6059 | string: irqs[i]) < 0) |
6060 | return false; |
6061 | |
6062 | *flags |= MVPP2_F_DT_COMPAT; |
6063 | return true; |
6064 | } |
6065 | |
6066 | /* Checks if the port dt description has the required Tx interrupts: |
6067 | * - PPv2.1: there are no such interrupts. |
6068 | * - PPv2.2 and PPv2.3: |
6069 | * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] |
6070 | * - The new ones have: "hifX" with X in [0..8] |
6071 | * |
6072 | * All those variants are supported to keep the backward compatibility. |
6073 | */ |
6074 | static bool mvpp2_port_has_irqs(struct mvpp2 *priv, |
6075 | struct device_node *port_node, |
6076 | unsigned long *flags) |
6077 | { |
6078 | char name[5]; |
6079 | int i; |
6080 | |
6081 | /* ACPI */ |
6082 | if (!port_node) |
6083 | return true; |
6084 | |
6085 | if (priv->hw_version == MVPP21) |
6086 | return false; |
6087 | |
6088 | if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) |
6089 | return true; |
6090 | |
6091 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
6092 | snprintf(buf: name, size: 5, fmt: "hif%d" , i); |
6093 | if (of_property_match_string(np: port_node, propname: "interrupt-names" , |
6094 | string: name) < 0) |
6095 | return false; |
6096 | } |
6097 | |
6098 | return true; |
6099 | } |
6100 | |
6101 | static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, |
6102 | struct fwnode_handle *fwnode, |
6103 | char **mac_from) |
6104 | { |
6105 | struct mvpp2_port *port = netdev_priv(dev); |
6106 | char hw_mac_addr[ETH_ALEN] = {0}; |
6107 | char fw_mac_addr[ETH_ALEN]; |
6108 | int ret; |
6109 | |
6110 | if (!fwnode_get_mac_address(fwnode, addr: fw_mac_addr)) { |
6111 | *mac_from = "firmware node" ; |
6112 | eth_hw_addr_set(dev, addr: fw_mac_addr); |
6113 | return 0; |
6114 | } |
6115 | |
6116 | if (priv->hw_version == MVPP21) { |
6117 | mvpp21_get_mac_address(port, addr: hw_mac_addr); |
6118 | if (is_valid_ether_addr(addr: hw_mac_addr)) { |
6119 | *mac_from = "hardware" ; |
6120 | eth_hw_addr_set(dev, addr: hw_mac_addr); |
6121 | return 0; |
6122 | } |
6123 | } |
6124 | |
6125 | /* Only valid on OF enabled platforms */ |
6126 | ret = of_get_mac_address_nvmem(to_of_node(fwnode), mac: fw_mac_addr); |
6127 | if (ret == -EPROBE_DEFER) |
6128 | return ret; |
6129 | if (!ret) { |
6130 | *mac_from = "nvmem cell" ; |
6131 | eth_hw_addr_set(dev, addr: fw_mac_addr); |
6132 | return 0; |
6133 | } |
6134 | |
6135 | *mac_from = "random" ; |
6136 | eth_hw_addr_random(dev); |
6137 | |
6138 | return 0; |
6139 | } |
6140 | |
6141 | static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) |
6142 | { |
6143 | return container_of(config, struct mvpp2_port, phylink_config); |
6144 | } |
6145 | |
6146 | static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) |
6147 | { |
6148 | return container_of(pcs, struct mvpp2_port, pcs_xlg); |
6149 | } |
6150 | |
6151 | static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) |
6152 | { |
6153 | return container_of(pcs, struct mvpp2_port, pcs_gmac); |
6154 | } |
6155 | |
6156 | static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, |
6157 | struct phylink_link_state *state) |
6158 | { |
6159 | struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); |
6160 | u32 val; |
6161 | |
6162 | if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) |
6163 | state->speed = SPEED_5000; |
6164 | else |
6165 | state->speed = SPEED_10000; |
6166 | state->duplex = 1; |
6167 | state->an_complete = 1; |
6168 | |
6169 | val = readl(addr: port->base + MVPP22_XLG_STATUS); |
6170 | state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); |
6171 | |
6172 | state->pause = 0; |
6173 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
6174 | if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) |
6175 | state->pause |= MLO_PAUSE_TX; |
6176 | if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) |
6177 | state->pause |= MLO_PAUSE_RX; |
6178 | } |
6179 | |
6180 | static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
6181 | phy_interface_t interface, |
6182 | const unsigned long *advertising, |
6183 | bool permit_pause_to_mac) |
6184 | { |
6185 | return 0; |
6186 | } |
6187 | |
6188 | static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { |
6189 | .pcs_get_state = mvpp2_xlg_pcs_get_state, |
6190 | .pcs_config = mvpp2_xlg_pcs_config, |
6191 | }; |
6192 | |
6193 | static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, |
6194 | unsigned long *supported, |
6195 | const struct phylink_link_state *state) |
6196 | { |
6197 | /* When in 802.3z mode, we must have AN enabled: |
6198 | * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... |
6199 | * When <PortType> = 1 (1000BASE-X) this field must be set to 1. |
6200 | */ |
6201 | if (phy_interface_mode_is_8023z(mode: state->interface) && |
6202 | !phylink_test(state->advertising, Autoneg)) |
6203 | return -EINVAL; |
6204 | |
6205 | return 0; |
6206 | } |
6207 | |
6208 | static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, |
6209 | struct phylink_link_state *state) |
6210 | { |
6211 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
6212 | u32 val; |
6213 | |
6214 | val = readl(addr: port->base + MVPP2_GMAC_STATUS0); |
6215 | |
6216 | state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); |
6217 | state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); |
6218 | state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); |
6219 | |
6220 | switch (port->phy_interface) { |
6221 | case PHY_INTERFACE_MODE_1000BASEX: |
6222 | state->speed = SPEED_1000; |
6223 | break; |
6224 | case PHY_INTERFACE_MODE_2500BASEX: |
6225 | state->speed = SPEED_2500; |
6226 | break; |
6227 | default: |
6228 | if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) |
6229 | state->speed = SPEED_1000; |
6230 | else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) |
6231 | state->speed = SPEED_100; |
6232 | else |
6233 | state->speed = SPEED_10; |
6234 | } |
6235 | |
6236 | state->pause = 0; |
6237 | if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) |
6238 | state->pause |= MLO_PAUSE_RX; |
6239 | if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) |
6240 | state->pause |= MLO_PAUSE_TX; |
6241 | } |
6242 | |
6243 | static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
6244 | phy_interface_t interface, |
6245 | const unsigned long *advertising, |
6246 | bool permit_pause_to_mac) |
6247 | { |
6248 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
6249 | u32 mask, val, an, old_an, changed; |
6250 | |
6251 | mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | |
6252 | MVPP2_GMAC_IN_BAND_AUTONEG | |
6253 | MVPP2_GMAC_AN_SPEED_EN | |
6254 | MVPP2_GMAC_FLOW_CTRL_AUTONEG | |
6255 | MVPP2_GMAC_AN_DUPLEX_EN; |
6256 | |
6257 | if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { |
6258 | mask |= MVPP2_GMAC_CONFIG_MII_SPEED | |
6259 | MVPP2_GMAC_CONFIG_GMII_SPEED | |
6260 | MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
6261 | val = MVPP2_GMAC_IN_BAND_AUTONEG; |
6262 | |
6263 | if (interface == PHY_INTERFACE_MODE_SGMII) { |
6264 | /* SGMII mode receives the speed and duplex from PHY */ |
6265 | val |= MVPP2_GMAC_AN_SPEED_EN | |
6266 | MVPP2_GMAC_AN_DUPLEX_EN; |
6267 | } else { |
6268 | /* 802.3z mode has fixed speed and duplex */ |
6269 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED | |
6270 | MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
6271 | |
6272 | /* The FLOW_CTRL_AUTONEG bit selects either the hardware |
6273 | * automatically or the bits in MVPP22_GMAC_CTRL_4_REG |
6274 | * manually controls the GMAC pause modes. |
6275 | */ |
6276 | if (permit_pause_to_mac) |
6277 | val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; |
6278 | |
6279 | /* Configure advertisement bits */ |
6280 | mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; |
6281 | if (phylink_test(advertising, Pause)) |
6282 | val |= MVPP2_GMAC_FC_ADV_EN; |
6283 | if (phylink_test(advertising, Asym_Pause)) |
6284 | val |= MVPP2_GMAC_FC_ADV_ASM_EN; |
6285 | } |
6286 | } else { |
6287 | val = 0; |
6288 | } |
6289 | |
6290 | old_an = an = readl(addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6291 | an = (an & ~mask) | val; |
6292 | changed = an ^ old_an; |
6293 | if (changed) |
6294 | writel(val: an, addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6295 | |
6296 | /* We are only interested in the advertisement bits changing */ |
6297 | return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); |
6298 | } |
6299 | |
6300 | static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) |
6301 | { |
6302 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
6303 | u32 val = readl(addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6304 | |
6305 | writel(val: val | MVPP2_GMAC_IN_BAND_RESTART_AN, |
6306 | addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6307 | writel(val: val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, |
6308 | addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6309 | } |
6310 | |
6311 | static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { |
6312 | .pcs_validate = mvpp2_gmac_pcs_validate, |
6313 | .pcs_get_state = mvpp2_gmac_pcs_get_state, |
6314 | .pcs_config = mvpp2_gmac_pcs_config, |
6315 | .pcs_an_restart = mvpp2_gmac_pcs_an_restart, |
6316 | }; |
6317 | |
6318 | static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, |
6319 | const struct phylink_link_state *state) |
6320 | { |
6321 | u32 val; |
6322 | |
6323 | mvpp2_modify(ptr: port->base + MVPP22_XLG_CTRL0_REG, |
6324 | MVPP22_XLG_CTRL0_MAC_RESET_DIS, |
6325 | MVPP22_XLG_CTRL0_MAC_RESET_DIS); |
6326 | mvpp2_modify(ptr: port->base + MVPP22_XLG_CTRL4_REG, |
6327 | MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | |
6328 | MVPP22_XLG_CTRL4_EN_IDLE_CHECK | |
6329 | MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, |
6330 | MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); |
6331 | |
6332 | /* Wait for reset to deassert */ |
6333 | do { |
6334 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
6335 | } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); |
6336 | } |
6337 | |
6338 | static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, |
6339 | const struct phylink_link_state *state) |
6340 | { |
6341 | u32 old_ctrl0, ctrl0; |
6342 | u32 old_ctrl2, ctrl2; |
6343 | u32 old_ctrl4, ctrl4; |
6344 | |
6345 | old_ctrl0 = ctrl0 = readl(addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
6346 | old_ctrl2 = ctrl2 = readl(addr: port->base + MVPP2_GMAC_CTRL_2_REG); |
6347 | old_ctrl4 = ctrl4 = readl(addr: port->base + MVPP22_GMAC_CTRL_4_REG); |
6348 | |
6349 | ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; |
6350 | ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); |
6351 | |
6352 | /* Configure port type */ |
6353 | if (phy_interface_mode_is_8023z(mode: state->interface)) { |
6354 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; |
6355 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; |
6356 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | |
6357 | MVPP22_CTRL4_DP_CLK_SEL | |
6358 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
6359 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
6360 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; |
6361 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; |
6362 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | |
6363 | MVPP22_CTRL4_DP_CLK_SEL | |
6364 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
6365 | } else if (phy_interface_mode_is_rgmii(mode: state->interface)) { |
6366 | ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; |
6367 | ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | |
6368 | MVPP22_CTRL4_SYNC_BYPASS_DIS | |
6369 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
6370 | } |
6371 | |
6372 | /* Configure negotiation style */ |
6373 | if (!phylink_autoneg_inband(mode)) { |
6374 | /* Phy or fixed speed - no in-band AN, nothing to do, leave the |
6375 | * configured speed, duplex and flow control as-is. |
6376 | */ |
6377 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
6378 | /* SGMII in-band mode receives the speed and duplex from |
6379 | * the PHY. Flow control information is not received. */ |
6380 | } else if (phy_interface_mode_is_8023z(mode: state->interface)) { |
6381 | /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can |
6382 | * they negotiate duplex: they are always operating with a fixed |
6383 | * speed of 1000/2500Mbps in full duplex, so force 1000/2500 |
6384 | * speed and full duplex here. |
6385 | */ |
6386 | ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; |
6387 | } |
6388 | |
6389 | if (old_ctrl0 != ctrl0) |
6390 | writel(val: ctrl0, addr: port->base + MVPP2_GMAC_CTRL_0_REG); |
6391 | if (old_ctrl2 != ctrl2) |
6392 | writel(val: ctrl2, addr: port->base + MVPP2_GMAC_CTRL_2_REG); |
6393 | if (old_ctrl4 != ctrl4) |
6394 | writel(val: ctrl4, addr: port->base + MVPP22_GMAC_CTRL_4_REG); |
6395 | } |
6396 | |
6397 | static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, |
6398 | phy_interface_t interface) |
6399 | { |
6400 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6401 | |
6402 | /* Select the appropriate PCS operations depending on the |
6403 | * configured interface mode. We will only switch to a mode |
6404 | * that the validate() checks have already passed. |
6405 | */ |
6406 | if (mvpp2_is_xlg(interface)) |
6407 | return &port->pcs_xlg; |
6408 | else |
6409 | return &port->pcs_gmac; |
6410 | } |
6411 | |
6412 | static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, |
6413 | phy_interface_t interface) |
6414 | { |
6415 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6416 | |
6417 | /* Check for invalid configuration */ |
6418 | if (mvpp2_is_xlg(interface) && port->gop_id != 0) { |
6419 | netdev_err(dev: port->dev, format: "Invalid mode on %s\n" , port->dev->name); |
6420 | return -EINVAL; |
6421 | } |
6422 | |
6423 | if (port->phy_interface != interface || |
6424 | phylink_autoneg_inband(mode)) { |
6425 | /* Force the link down when changing the interface or if in |
6426 | * in-band mode to ensure we do not change the configuration |
6427 | * while the hardware is indicating link is up. We force both |
6428 | * XLG and GMAC down to ensure that they're both in a known |
6429 | * state. |
6430 | */ |
6431 | mvpp2_modify(ptr: port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
6432 | MVPP2_GMAC_FORCE_LINK_PASS | |
6433 | MVPP2_GMAC_FORCE_LINK_DOWN, |
6434 | MVPP2_GMAC_FORCE_LINK_DOWN); |
6435 | |
6436 | if (mvpp2_port_supports_xlg(port)) |
6437 | mvpp2_modify(ptr: port->base + MVPP22_XLG_CTRL0_REG, |
6438 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
6439 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, |
6440 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); |
6441 | } |
6442 | |
6443 | /* Make sure the port is disabled when reconfiguring the mode */ |
6444 | mvpp2_port_disable(port); |
6445 | |
6446 | if (port->phy_interface != interface) { |
6447 | /* Place GMAC into reset */ |
6448 | mvpp2_modify(ptr: port->base + MVPP2_GMAC_CTRL_2_REG, |
6449 | MVPP2_GMAC_PORT_RESET_MASK, |
6450 | MVPP2_GMAC_PORT_RESET_MASK); |
6451 | |
6452 | if (port->priv->hw_version >= MVPP22) { |
6453 | mvpp22_gop_mask_irq(port); |
6454 | |
6455 | phy_power_off(phy: port->comphy); |
6456 | |
6457 | /* Reconfigure the serdes lanes */ |
6458 | mvpp22_mode_reconfigure(port, interface); |
6459 | } |
6460 | } |
6461 | |
6462 | return 0; |
6463 | } |
6464 | |
6465 | static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, |
6466 | const struct phylink_link_state *state) |
6467 | { |
6468 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6469 | |
6470 | /* mac (re)configuration */ |
6471 | if (mvpp2_is_xlg(interface: state->interface)) |
6472 | mvpp2_xlg_config(port, mode, state); |
6473 | else if (phy_interface_mode_is_rgmii(mode: state->interface) || |
6474 | phy_interface_mode_is_8023z(mode: state->interface) || |
6475 | state->interface == PHY_INTERFACE_MODE_SGMII) |
6476 | mvpp2_gmac_config(port, mode, state); |
6477 | |
6478 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) |
6479 | mvpp2_port_loopback_set(port, state); |
6480 | } |
6481 | |
6482 | static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, |
6483 | phy_interface_t interface) |
6484 | { |
6485 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6486 | |
6487 | if (port->priv->hw_version >= MVPP22 && |
6488 | port->phy_interface != interface) { |
6489 | port->phy_interface = interface; |
6490 | |
6491 | /* Unmask interrupts */ |
6492 | mvpp22_gop_unmask_irq(port); |
6493 | } |
6494 | |
6495 | if (!mvpp2_is_xlg(interface)) { |
6496 | /* Release GMAC reset and wait */ |
6497 | mvpp2_modify(ptr: port->base + MVPP2_GMAC_CTRL_2_REG, |
6498 | MVPP2_GMAC_PORT_RESET_MASK, set: 0); |
6499 | |
6500 | while (readl(addr: port->base + MVPP2_GMAC_CTRL_2_REG) & |
6501 | MVPP2_GMAC_PORT_RESET_MASK) |
6502 | continue; |
6503 | } |
6504 | |
6505 | mvpp2_port_enable(port); |
6506 | |
6507 | /* Allow the link to come up if in in-band mode, otherwise the |
6508 | * link is forced via mac_link_down()/mac_link_up() |
6509 | */ |
6510 | if (phylink_autoneg_inband(mode)) { |
6511 | if (mvpp2_is_xlg(interface)) |
6512 | mvpp2_modify(ptr: port->base + MVPP22_XLG_CTRL0_REG, |
6513 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
6514 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, set: 0); |
6515 | else |
6516 | mvpp2_modify(ptr: port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
6517 | MVPP2_GMAC_FORCE_LINK_PASS | |
6518 | MVPP2_GMAC_FORCE_LINK_DOWN, set: 0); |
6519 | } |
6520 | |
6521 | return 0; |
6522 | } |
6523 | |
6524 | static void mvpp2_mac_link_up(struct phylink_config *config, |
6525 | struct phy_device *phy, |
6526 | unsigned int mode, phy_interface_t interface, |
6527 | int speed, int duplex, |
6528 | bool tx_pause, bool rx_pause) |
6529 | { |
6530 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6531 | u32 val; |
6532 | int i; |
6533 | |
6534 | if (mvpp2_is_xlg(interface)) { |
6535 | if (!phylink_autoneg_inband(mode)) { |
6536 | val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
6537 | if (tx_pause) |
6538 | val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; |
6539 | if (rx_pause) |
6540 | val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; |
6541 | |
6542 | mvpp2_modify(ptr: port->base + MVPP22_XLG_CTRL0_REG, |
6543 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | |
6544 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
6545 | MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | |
6546 | MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, set: val); |
6547 | } |
6548 | } else { |
6549 | if (!phylink_autoneg_inband(mode)) { |
6550 | val = MVPP2_GMAC_FORCE_LINK_PASS; |
6551 | |
6552 | if (speed == SPEED_1000 || speed == SPEED_2500) |
6553 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED; |
6554 | else if (speed == SPEED_100) |
6555 | val |= MVPP2_GMAC_CONFIG_MII_SPEED; |
6556 | |
6557 | if (duplex == DUPLEX_FULL) |
6558 | val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
6559 | |
6560 | mvpp2_modify(ptr: port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
6561 | MVPP2_GMAC_FORCE_LINK_DOWN | |
6562 | MVPP2_GMAC_FORCE_LINK_PASS | |
6563 | MVPP2_GMAC_CONFIG_MII_SPEED | |
6564 | MVPP2_GMAC_CONFIG_GMII_SPEED | |
6565 | MVPP2_GMAC_CONFIG_FULL_DUPLEX, set: val); |
6566 | } |
6567 | |
6568 | /* We can always update the flow control enable bits; |
6569 | * these will only be effective if flow control AN |
6570 | * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. |
6571 | */ |
6572 | val = 0; |
6573 | if (tx_pause) |
6574 | val |= MVPP22_CTRL4_TX_FC_EN; |
6575 | if (rx_pause) |
6576 | val |= MVPP22_CTRL4_RX_FC_EN; |
6577 | |
6578 | mvpp2_modify(ptr: port->base + MVPP22_GMAC_CTRL_4_REG, |
6579 | MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, |
6580 | set: val); |
6581 | } |
6582 | |
6583 | if (port->priv->global_tx_fc) { |
6584 | port->tx_fc = tx_pause; |
6585 | if (tx_pause) |
6586 | mvpp2_rxq_enable_fc(port); |
6587 | else |
6588 | mvpp2_rxq_disable_fc(port); |
6589 | if (port->priv->percpu_pools) { |
6590 | for (i = 0; i < port->nrxqs; i++) |
6591 | mvpp2_bm_pool_update_fc(port, pool: &port->priv->bm_pools[i], en: tx_pause); |
6592 | } else { |
6593 | mvpp2_bm_pool_update_fc(port, pool: port->pool_long, en: tx_pause); |
6594 | mvpp2_bm_pool_update_fc(port, pool: port->pool_short, en: tx_pause); |
6595 | } |
6596 | if (port->priv->hw_version == MVPP23) |
6597 | mvpp23_rx_fifo_fc_en(priv: port->priv, port: port->id, en: tx_pause); |
6598 | } |
6599 | |
6600 | mvpp2_port_enable(port); |
6601 | |
6602 | mvpp2_egress_enable(port); |
6603 | mvpp2_ingress_enable(port); |
6604 | netif_tx_wake_all_queues(dev: port->dev); |
6605 | } |
6606 | |
6607 | static void mvpp2_mac_link_down(struct phylink_config *config, |
6608 | unsigned int mode, phy_interface_t interface) |
6609 | { |
6610 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
6611 | u32 val; |
6612 | |
6613 | if (!phylink_autoneg_inband(mode)) { |
6614 | if (mvpp2_is_xlg(interface)) { |
6615 | val = readl(addr: port->base + MVPP22_XLG_CTRL0_REG); |
6616 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
6617 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; |
6618 | writel(val, addr: port->base + MVPP22_XLG_CTRL0_REG); |
6619 | } else { |
6620 | val = readl(addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6621 | val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
6622 | val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
6623 | writel(val, addr: port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6624 | } |
6625 | } |
6626 | |
6627 | netif_tx_stop_all_queues(dev: port->dev); |
6628 | mvpp2_egress_disable(port); |
6629 | mvpp2_ingress_disable(port); |
6630 | |
6631 | mvpp2_port_disable(port); |
6632 | } |
6633 | |
6634 | static const struct phylink_mac_ops mvpp2_phylink_ops = { |
6635 | .mac_select_pcs = mvpp2_select_pcs, |
6636 | .mac_prepare = mvpp2_mac_prepare, |
6637 | .mac_config = mvpp2_mac_config, |
6638 | .mac_finish = mvpp2_mac_finish, |
6639 | .mac_link_up = mvpp2_mac_link_up, |
6640 | .mac_link_down = mvpp2_mac_link_down, |
6641 | }; |
6642 | |
6643 | /* Work-around for ACPI */ |
6644 | static void mvpp2_acpi_start(struct mvpp2_port *port) |
6645 | { |
6646 | /* Phylink isn't used as of now for ACPI, so the MAC has to be |
6647 | * configured manually when the interface is started. This will |
6648 | * be removed as soon as the phylink ACPI support lands in. |
6649 | */ |
6650 | struct phylink_link_state state = { |
6651 | .interface = port->phy_interface, |
6652 | }; |
6653 | struct phylink_pcs *pcs; |
6654 | |
6655 | pcs = mvpp2_select_pcs(config: &port->phylink_config, interface: port->phy_interface); |
6656 | |
6657 | mvpp2_mac_prepare(config: &port->phylink_config, mode: MLO_AN_INBAND, |
6658 | interface: port->phy_interface); |
6659 | mvpp2_mac_config(config: &port->phylink_config, mode: MLO_AN_INBAND, state: &state); |
6660 | pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED, |
6661 | port->phy_interface, state.advertising, |
6662 | false); |
6663 | mvpp2_mac_finish(config: &port->phylink_config, mode: MLO_AN_INBAND, |
6664 | interface: port->phy_interface); |
6665 | mvpp2_mac_link_up(config: &port->phylink_config, NULL, |
6666 | mode: MLO_AN_INBAND, interface: port->phy_interface, |
6667 | SPEED_UNKNOWN, DUPLEX_UNKNOWN, tx_pause: false, rx_pause: false); |
6668 | } |
6669 | |
6670 | /* In order to ensure backward compatibility for ACPI, check if the port |
6671 | * firmware node comprises the necessary description allowing to use phylink. |
6672 | */ |
6673 | static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) |
6674 | { |
6675 | if (!is_acpi_node(fwnode: port_fwnode)) |
6676 | return false; |
6677 | |
6678 | return (!fwnode_property_present(fwnode: port_fwnode, propname: "phy-handle" ) && |
6679 | !fwnode_property_present(fwnode: port_fwnode, propname: "managed" ) && |
6680 | !fwnode_get_named_child_node(fwnode: port_fwnode, childname: "fixed-link" )); |
6681 | } |
6682 | |
6683 | /* Ports initialization */ |
6684 | static int mvpp2_port_probe(struct platform_device *pdev, |
6685 | struct fwnode_handle *port_fwnode, |
6686 | struct mvpp2 *priv) |
6687 | { |
6688 | struct phy *comphy = NULL; |
6689 | struct mvpp2_port *port; |
6690 | struct mvpp2_port_pcpu *port_pcpu; |
6691 | struct device_node *port_node = to_of_node(port_fwnode); |
6692 | netdev_features_t features; |
6693 | struct net_device *dev; |
6694 | struct phylink *phylink; |
6695 | char *mac_from = "" ; |
6696 | unsigned int ntxqs, nrxqs, thread; |
6697 | unsigned long flags = 0; |
6698 | bool has_tx_irqs; |
6699 | u32 id; |
6700 | int phy_mode; |
6701 | int err, i; |
6702 | |
6703 | has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, flags: &flags); |
6704 | if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { |
6705 | dev_err(&pdev->dev, |
6706 | "not enough IRQs to support multi queue mode\n" ); |
6707 | return -EINVAL; |
6708 | } |
6709 | |
6710 | ntxqs = MVPP2_MAX_TXQ; |
6711 | nrxqs = mvpp2_get_nrxqs(priv); |
6712 | |
6713 | dev = alloc_etherdev_mqs(sizeof_priv: sizeof(*port), txqs: ntxqs, rxqs: nrxqs); |
6714 | if (!dev) |
6715 | return -ENOMEM; |
6716 | |
6717 | phy_mode = fwnode_get_phy_mode(fwnode: port_fwnode); |
6718 | if (phy_mode < 0) { |
6719 | dev_err(&pdev->dev, "incorrect phy mode\n" ); |
6720 | err = phy_mode; |
6721 | goto err_free_netdev; |
6722 | } |
6723 | |
6724 | /* |
6725 | * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. |
6726 | * Existing usage of 10GBASE-KR is not correct; no backplane |
6727 | * negotiation is done, and this driver does not actually support |
6728 | * 10GBASE-KR. |
6729 | */ |
6730 | if (phy_mode == PHY_INTERFACE_MODE_10GKR) |
6731 | phy_mode = PHY_INTERFACE_MODE_10GBASER; |
6732 | |
6733 | if (port_node) { |
6734 | comphy = devm_of_phy_get(dev: &pdev->dev, np: port_node, NULL); |
6735 | if (IS_ERR(ptr: comphy)) { |
6736 | if (PTR_ERR(ptr: comphy) == -EPROBE_DEFER) { |
6737 | err = -EPROBE_DEFER; |
6738 | goto err_free_netdev; |
6739 | } |
6740 | comphy = NULL; |
6741 | } |
6742 | } |
6743 | |
6744 | if (fwnode_property_read_u32(fwnode: port_fwnode, propname: "port-id" , val: &id)) { |
6745 | err = -EINVAL; |
6746 | dev_err(&pdev->dev, "missing port-id value\n" ); |
6747 | goto err_free_netdev; |
6748 | } |
6749 | |
6750 | dev->tx_queue_len = MVPP2_MAX_TXD_MAX; |
6751 | dev->watchdog_timeo = 5 * HZ; |
6752 | dev->netdev_ops = &mvpp2_netdev_ops; |
6753 | dev->ethtool_ops = &mvpp2_eth_tool_ops; |
6754 | |
6755 | port = netdev_priv(dev); |
6756 | port->dev = dev; |
6757 | port->fwnode = port_fwnode; |
6758 | port->ntxqs = ntxqs; |
6759 | port->nrxqs = nrxqs; |
6760 | port->priv = priv; |
6761 | port->has_tx_irqs = has_tx_irqs; |
6762 | port->flags = flags; |
6763 | |
6764 | err = mvpp2_queue_vectors_init(port, port_node); |
6765 | if (err) |
6766 | goto err_free_netdev; |
6767 | |
6768 | if (port_node) |
6769 | port->port_irq = of_irq_get_byname(dev: port_node, name: "link" ); |
6770 | else |
6771 | port->port_irq = fwnode_irq_get(fwnode: port_fwnode, index: port->nqvecs + 1); |
6772 | if (port->port_irq == -EPROBE_DEFER) { |
6773 | err = -EPROBE_DEFER; |
6774 | goto err_deinit_qvecs; |
6775 | } |
6776 | if (port->port_irq <= 0) |
6777 | /* the link irq is optional */ |
6778 | port->port_irq = 0; |
6779 | |
6780 | if (fwnode_property_read_bool(fwnode: port_fwnode, propname: "marvell,loopback" )) |
6781 | port->flags |= MVPP2_F_LOOPBACK; |
6782 | |
6783 | port->id = id; |
6784 | if (priv->hw_version == MVPP21) |
6785 | port->first_rxq = port->id * port->nrxqs; |
6786 | else |
6787 | port->first_rxq = port->id * priv->max_port_rxqs; |
6788 | |
6789 | port->of_node = port_node; |
6790 | port->phy_interface = phy_mode; |
6791 | port->comphy = comphy; |
6792 | |
6793 | if (priv->hw_version == MVPP21) { |
6794 | port->base = devm_platform_ioremap_resource(pdev, index: 2 + id); |
6795 | if (IS_ERR(ptr: port->base)) { |
6796 | err = PTR_ERR(ptr: port->base); |
6797 | goto err_free_irq; |
6798 | } |
6799 | |
6800 | port->stats_base = port->priv->lms_base + |
6801 | MVPP21_MIB_COUNTERS_OFFSET + |
6802 | port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; |
6803 | } else { |
6804 | if (fwnode_property_read_u32(fwnode: port_fwnode, propname: "gop-port-id" , |
6805 | val: &port->gop_id)) { |
6806 | err = -EINVAL; |
6807 | dev_err(&pdev->dev, "missing gop-port-id value\n" ); |
6808 | goto err_deinit_qvecs; |
6809 | } |
6810 | |
6811 | port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); |
6812 | port->stats_base = port->priv->iface_base + |
6813 | MVPP22_MIB_COUNTERS_OFFSET + |
6814 | port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; |
6815 | |
6816 | /* We may want a property to describe whether we should use |
6817 | * MAC hardware timestamping. |
6818 | */ |
6819 | if (priv->tai) |
6820 | port->hwtstamp = true; |
6821 | } |
6822 | |
6823 | /* Alloc per-cpu and ethtool stats */ |
6824 | port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); |
6825 | if (!port->stats) { |
6826 | err = -ENOMEM; |
6827 | goto err_free_irq; |
6828 | } |
6829 | |
6830 | port->ethtool_stats = devm_kcalloc(dev: &pdev->dev, |
6831 | MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), |
6832 | size: sizeof(u64), GFP_KERNEL); |
6833 | if (!port->ethtool_stats) { |
6834 | err = -ENOMEM; |
6835 | goto err_free_stats; |
6836 | } |
6837 | |
6838 | mutex_init(&port->gather_stats_lock); |
6839 | INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); |
6840 | |
6841 | err = mvpp2_port_copy_mac_addr(dev, priv, fwnode: port_fwnode, mac_from: &mac_from); |
6842 | if (err < 0) |
6843 | goto err_free_stats; |
6844 | |
6845 | port->tx_ring_size = MVPP2_MAX_TXD_DFLT; |
6846 | port->rx_ring_size = MVPP2_MAX_RXD_DFLT; |
6847 | SET_NETDEV_DEV(dev, &pdev->dev); |
6848 | |
6849 | err = mvpp2_port_init(port); |
6850 | if (err < 0) { |
6851 | dev_err(&pdev->dev, "failed to init port %d\n" , id); |
6852 | goto err_free_stats; |
6853 | } |
6854 | |
6855 | mvpp2_port_periodic_xon_disable(port); |
6856 | |
6857 | mvpp2_mac_reset_assert(port); |
6858 | mvpp22_pcs_reset_assert(port); |
6859 | |
6860 | port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); |
6861 | if (!port->pcpu) { |
6862 | err = -ENOMEM; |
6863 | goto err_free_txq_pcpu; |
6864 | } |
6865 | |
6866 | if (!port->has_tx_irqs) { |
6867 | for (thread = 0; thread < priv->nthreads; thread++) { |
6868 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
6869 | |
6870 | hrtimer_init(timer: &port_pcpu->tx_done_timer, CLOCK_MONOTONIC, |
6871 | mode: HRTIMER_MODE_REL_PINNED_SOFT); |
6872 | port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; |
6873 | port_pcpu->timer_scheduled = false; |
6874 | port_pcpu->dev = dev; |
6875 | } |
6876 | } |
6877 | |
6878 | features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
6879 | NETIF_F_TSO; |
6880 | dev->features = features | NETIF_F_RXCSUM; |
6881 | dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | |
6882 | NETIF_F_HW_VLAN_CTAG_FILTER; |
6883 | |
6884 | if (mvpp22_rss_is_supported(port)) { |
6885 | dev->hw_features |= NETIF_F_RXHASH; |
6886 | dev->features |= NETIF_F_NTUPLE; |
6887 | } |
6888 | |
6889 | if (!port->priv->percpu_pools) |
6890 | mvpp2_set_hw_csum(port, new_long_pool: port->pool_long->id); |
6891 | else if (port->ntxqs >= num_possible_cpus() * 2) |
6892 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | |
6893 | NETDEV_XDP_ACT_REDIRECT | |
6894 | NETDEV_XDP_ACT_NDO_XMIT; |
6895 | |
6896 | dev->vlan_features |= features; |
6897 | netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); |
6898 | |
6899 | dev->priv_flags |= IFF_UNICAST_FLT; |
6900 | |
6901 | /* MTU range: 68 - 9704 */ |
6902 | dev->min_mtu = ETH_MIN_MTU; |
6903 | /* 9704 == 9728 - 20 and rounding to 8 */ |
6904 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; |
6905 | device_set_node(dev: &dev->dev, fwnode: port_fwnode); |
6906 | |
6907 | port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; |
6908 | port->pcs_gmac.neg_mode = true; |
6909 | port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; |
6910 | port->pcs_xlg.neg_mode = true; |
6911 | |
6912 | if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { |
6913 | port->phylink_config.dev = &dev->dev; |
6914 | port->phylink_config.type = PHYLINK_NETDEV; |
6915 | port->phylink_config.mac_capabilities = |
6916 | MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; |
6917 | |
6918 | if (port->priv->global_tx_fc) |
6919 | port->phylink_config.mac_capabilities |= |
6920 | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; |
6921 | |
6922 | if (mvpp2_port_supports_xlg(port)) { |
6923 | /* If a COMPHY is present, we can support any of |
6924 | * the serdes modes and switch between them. |
6925 | */ |
6926 | if (comphy) { |
6927 | __set_bit(PHY_INTERFACE_MODE_5GBASER, |
6928 | port->phylink_config.supported_interfaces); |
6929 | __set_bit(PHY_INTERFACE_MODE_10GBASER, |
6930 | port->phylink_config.supported_interfaces); |
6931 | __set_bit(PHY_INTERFACE_MODE_XAUI, |
6932 | port->phylink_config.supported_interfaces); |
6933 | } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { |
6934 | __set_bit(PHY_INTERFACE_MODE_5GBASER, |
6935 | port->phylink_config.supported_interfaces); |
6936 | } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { |
6937 | __set_bit(PHY_INTERFACE_MODE_10GBASER, |
6938 | port->phylink_config.supported_interfaces); |
6939 | } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { |
6940 | __set_bit(PHY_INTERFACE_MODE_XAUI, |
6941 | port->phylink_config.supported_interfaces); |
6942 | } |
6943 | |
6944 | if (comphy) |
6945 | port->phylink_config.mac_capabilities |= |
6946 | MAC_10000FD | MAC_5000FD; |
6947 | else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) |
6948 | port->phylink_config.mac_capabilities |= |
6949 | MAC_5000FD; |
6950 | else |
6951 | port->phylink_config.mac_capabilities |= |
6952 | MAC_10000FD; |
6953 | } |
6954 | |
6955 | if (mvpp2_port_supports_rgmii(port)) { |
6956 | phy_interface_set_rgmii(intf: port->phylink_config.supported_interfaces); |
6957 | __set_bit(PHY_INTERFACE_MODE_MII, |
6958 | port->phylink_config.supported_interfaces); |
6959 | } |
6960 | |
6961 | if (comphy) { |
6962 | /* If a COMPHY is present, we can support any of the |
6963 | * serdes modes and switch between them. |
6964 | */ |
6965 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
6966 | port->phylink_config.supported_interfaces); |
6967 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
6968 | port->phylink_config.supported_interfaces); |
6969 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, |
6970 | port->phylink_config.supported_interfaces); |
6971 | } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { |
6972 | /* No COMPHY, with only 2500BASE-X mode supported */ |
6973 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, |
6974 | port->phylink_config.supported_interfaces); |
6975 | } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || |
6976 | phy_mode == PHY_INTERFACE_MODE_SGMII) { |
6977 | /* No COMPHY, we can switch between 1000BASE-X and SGMII |
6978 | */ |
6979 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
6980 | port->phylink_config.supported_interfaces); |
6981 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
6982 | port->phylink_config.supported_interfaces); |
6983 | } |
6984 | |
6985 | phylink = phylink_create(&port->phylink_config, port_fwnode, |
6986 | phy_mode, &mvpp2_phylink_ops); |
6987 | if (IS_ERR(ptr: phylink)) { |
6988 | err = PTR_ERR(ptr: phylink); |
6989 | goto err_free_port_pcpu; |
6990 | } |
6991 | port->phylink = phylink; |
6992 | } else { |
6993 | dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n" , port->id); |
6994 | port->phylink = NULL; |
6995 | } |
6996 | |
6997 | /* Cycle the comphy to power it down, saving 270mW per port - |
6998 | * don't worry about an error powering it up. When the comphy |
6999 | * driver does this, we can remove this code. |
7000 | */ |
7001 | if (port->comphy) { |
7002 | err = mvpp22_comphy_init(port, interface: port->phy_interface); |
7003 | if (err == 0) |
7004 | phy_power_off(phy: port->comphy); |
7005 | } |
7006 | |
7007 | err = register_netdev(dev); |
7008 | if (err < 0) { |
7009 | dev_err(&pdev->dev, "failed to register netdev\n" ); |
7010 | goto err_phylink; |
7011 | } |
7012 | netdev_info(dev, format: "Using %s mac address %pM\n" , mac_from, dev->dev_addr); |
7013 | |
7014 | priv->port_list[priv->port_count++] = port; |
7015 | |
7016 | return 0; |
7017 | |
7018 | err_phylink: |
7019 | if (port->phylink) |
7020 | phylink_destroy(port->phylink); |
7021 | err_free_port_pcpu: |
7022 | free_percpu(pdata: port->pcpu); |
7023 | err_free_txq_pcpu: |
7024 | for (i = 0; i < port->ntxqs; i++) |
7025 | free_percpu(pdata: port->txqs[i]->pcpu); |
7026 | err_free_stats: |
7027 | free_percpu(pdata: port->stats); |
7028 | err_free_irq: |
7029 | if (port->port_irq) |
7030 | irq_dispose_mapping(virq: port->port_irq); |
7031 | err_deinit_qvecs: |
7032 | mvpp2_queue_vectors_deinit(port); |
7033 | err_free_netdev: |
7034 | free_netdev(dev); |
7035 | return err; |
7036 | } |
7037 | |
7038 | /* Ports removal routine */ |
7039 | static void mvpp2_port_remove(struct mvpp2_port *port) |
7040 | { |
7041 | int i; |
7042 | |
7043 | unregister_netdev(dev: port->dev); |
7044 | if (port->phylink) |
7045 | phylink_destroy(port->phylink); |
7046 | free_percpu(pdata: port->pcpu); |
7047 | free_percpu(pdata: port->stats); |
7048 | for (i = 0; i < port->ntxqs; i++) |
7049 | free_percpu(pdata: port->txqs[i]->pcpu); |
7050 | mvpp2_queue_vectors_deinit(port); |
7051 | if (port->port_irq) |
7052 | irq_dispose_mapping(virq: port->port_irq); |
7053 | free_netdev(dev: port->dev); |
7054 | } |
7055 | |
7056 | /* Initialize decoding windows */ |
7057 | static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, |
7058 | struct mvpp2 *priv) |
7059 | { |
7060 | u32 win_enable; |
7061 | int i; |
7062 | |
7063 | for (i = 0; i < 6; i++) { |
7064 | mvpp2_write(priv, MVPP2_WIN_BASE(i), data: 0); |
7065 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), data: 0); |
7066 | |
7067 | if (i < 4) |
7068 | mvpp2_write(priv, MVPP2_WIN_REMAP(i), data: 0); |
7069 | } |
7070 | |
7071 | win_enable = 0; |
7072 | |
7073 | for (i = 0; i < dram->num_cs; i++) { |
7074 | const struct mbus_dram_window *cs = dram->cs + i; |
7075 | |
7076 | mvpp2_write(priv, MVPP2_WIN_BASE(i), |
7077 | data: (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | |
7078 | dram->mbus_dram_target_id); |
7079 | |
7080 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), |
7081 | data: (cs->size - 1) & 0xffff0000); |
7082 | |
7083 | win_enable |= (1 << i); |
7084 | } |
7085 | |
7086 | mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, data: win_enable); |
7087 | } |
7088 | |
7089 | /* Initialize Rx FIFO's */ |
7090 | static void mvpp2_rx_fifo_init(struct mvpp2 *priv) |
7091 | { |
7092 | int port; |
7093 | |
7094 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { |
7095 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), |
7096 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); |
7097 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), |
7098 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); |
7099 | } |
7100 | |
7101 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, |
7102 | MVPP2_RX_FIFO_PORT_MIN_PKT); |
7103 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, data: 0x1); |
7104 | } |
7105 | |
7106 | static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) |
7107 | { |
7108 | int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); |
7109 | |
7110 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data: data_size); |
7111 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), data: attr_size); |
7112 | } |
7113 | |
7114 | /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. |
7115 | * 4kB fixed space must be assigned for the loopback port. |
7116 | * Redistribute remaining avialable 44kB space among all active ports. |
7117 | * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G |
7118 | * SGMII link. |
7119 | */ |
7120 | static void mvpp22_rx_fifo_init(struct mvpp2 *priv) |
7121 | { |
7122 | int remaining_ports_count; |
7123 | unsigned long port_map; |
7124 | int size_remainder; |
7125 | int port, size; |
7126 | |
7127 | /* The loopback requires fixed 4kB of the FIFO space assignment. */ |
7128 | mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, |
7129 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); |
7130 | port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); |
7131 | |
7132 | /* Set RX FIFO size to 0 for inactive ports. */ |
7133 | for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) |
7134 | mvpp22_rx_fifo_set_hw(priv, port, data_size: 0); |
7135 | |
7136 | /* Assign remaining RX FIFO space among all active ports. */ |
7137 | size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; |
7138 | remaining_ports_count = hweight_long(w: port_map); |
7139 | |
7140 | for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { |
7141 | if (remaining_ports_count == 1) |
7142 | size = size_remainder; |
7143 | else if (port == 0) |
7144 | size = max(size_remainder / remaining_ports_count, |
7145 | MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); |
7146 | else if (port == 1) |
7147 | size = max(size_remainder / remaining_ports_count, |
7148 | MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); |
7149 | else |
7150 | size = size_remainder / remaining_ports_count; |
7151 | |
7152 | size_remainder -= size; |
7153 | remaining_ports_count--; |
7154 | |
7155 | mvpp22_rx_fifo_set_hw(priv, port, data_size: size); |
7156 | } |
7157 | |
7158 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, |
7159 | MVPP2_RX_FIFO_PORT_MIN_PKT); |
7160 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, data: 0x1); |
7161 | } |
7162 | |
7163 | /* Configure Rx FIFO Flow control thresholds */ |
7164 | static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) |
7165 | { |
7166 | int port, val; |
7167 | |
7168 | /* Port 0: maximum speed -10Gb/s port |
7169 | * required by spec RX FIFO threshold 9KB |
7170 | * Port 1: maximum speed -5Gb/s port |
7171 | * required by spec RX FIFO threshold 4KB |
7172 | * Port 2: maximum speed -1Gb/s port |
7173 | * required by spec RX FIFO threshold 2KB |
7174 | */ |
7175 | |
7176 | /* Without loopback port */ |
7177 | for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { |
7178 | if (port == 0) { |
7179 | val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) |
7180 | << MVPP2_RX_FC_TRSH_OFFS; |
7181 | val &= MVPP2_RX_FC_TRSH_MASK; |
7182 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), data: val); |
7183 | } else if (port == 1) { |
7184 | val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) |
7185 | << MVPP2_RX_FC_TRSH_OFFS; |
7186 | val &= MVPP2_RX_FC_TRSH_MASK; |
7187 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), data: val); |
7188 | } else { |
7189 | val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) |
7190 | << MVPP2_RX_FC_TRSH_OFFS; |
7191 | val &= MVPP2_RX_FC_TRSH_MASK; |
7192 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), data: val); |
7193 | } |
7194 | } |
7195 | } |
7196 | |
7197 | /* Configure Rx FIFO Flow control thresholds */ |
7198 | void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) |
7199 | { |
7200 | int val; |
7201 | |
7202 | val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); |
7203 | |
7204 | if (en) |
7205 | val |= MVPP2_RX_FC_EN; |
7206 | else |
7207 | val &= ~MVPP2_RX_FC_EN; |
7208 | |
7209 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), data: val); |
7210 | } |
7211 | |
7212 | static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) |
7213 | { |
7214 | int threshold = MVPP2_TX_FIFO_THRESHOLD(size); |
7215 | |
7216 | mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), data: size); |
7217 | mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), data: threshold); |
7218 | } |
7219 | |
7220 | /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. |
7221 | * 1kB fixed space must be assigned for the loopback port. |
7222 | * Redistribute remaining avialable 18kB space among all active ports. |
7223 | * The 10G interface should use 10kB (which is maximum possible size |
7224 | * per single port). |
7225 | */ |
7226 | static void mvpp22_tx_fifo_init(struct mvpp2 *priv) |
7227 | { |
7228 | int remaining_ports_count; |
7229 | unsigned long port_map; |
7230 | int size_remainder; |
7231 | int port, size; |
7232 | |
7233 | /* The loopback requires fixed 1kB of the FIFO space assignment. */ |
7234 | mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, |
7235 | MVPP22_TX_FIFO_DATA_SIZE_1KB); |
7236 | port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); |
7237 | |
7238 | /* Set TX FIFO size to 0 for inactive ports. */ |
7239 | for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) |
7240 | mvpp22_tx_fifo_set_hw(priv, port, size: 0); |
7241 | |
7242 | /* Assign remaining TX FIFO space among all active ports. */ |
7243 | size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; |
7244 | remaining_ports_count = hweight_long(w: port_map); |
7245 | |
7246 | for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { |
7247 | if (remaining_ports_count == 1) |
7248 | size = min(size_remainder, |
7249 | MVPP22_TX_FIFO_DATA_SIZE_10KB); |
7250 | else if (port == 0) |
7251 | size = MVPP22_TX_FIFO_DATA_SIZE_10KB; |
7252 | else |
7253 | size = size_remainder / remaining_ports_count; |
7254 | |
7255 | size_remainder -= size; |
7256 | remaining_ports_count--; |
7257 | |
7258 | mvpp22_tx_fifo_set_hw(priv, port, size); |
7259 | } |
7260 | } |
7261 | |
7262 | static void mvpp2_axi_init(struct mvpp2 *priv) |
7263 | { |
7264 | u32 val, rdval, wrval; |
7265 | |
7266 | mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, data: 0x0); |
7267 | |
7268 | /* AXI Bridge Configuration */ |
7269 | |
7270 | rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE |
7271 | << MVPP22_AXI_ATTR_CACHE_OFFS; |
7272 | rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM |
7273 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; |
7274 | |
7275 | wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE |
7276 | << MVPP22_AXI_ATTR_CACHE_OFFS; |
7277 | wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM |
7278 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; |
7279 | |
7280 | /* BM */ |
7281 | mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, data: wrval); |
7282 | mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, data: rdval); |
7283 | |
7284 | /* Descriptors */ |
7285 | mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, data: rdval); |
7286 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, data: wrval); |
7287 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, data: rdval); |
7288 | mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, data: wrval); |
7289 | |
7290 | /* Buffer Data */ |
7291 | mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, data: rdval); |
7292 | mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, data: wrval); |
7293 | |
7294 | val = MVPP22_AXI_CODE_CACHE_NON_CACHE |
7295 | << MVPP22_AXI_CODE_CACHE_OFFS; |
7296 | val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM |
7297 | << MVPP22_AXI_CODE_DOMAIN_OFFS; |
7298 | mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, data: val); |
7299 | mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, data: val); |
7300 | |
7301 | val = MVPP22_AXI_CODE_CACHE_RD_CACHE |
7302 | << MVPP22_AXI_CODE_CACHE_OFFS; |
7303 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM |
7304 | << MVPP22_AXI_CODE_DOMAIN_OFFS; |
7305 | |
7306 | mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, data: val); |
7307 | |
7308 | val = MVPP22_AXI_CODE_CACHE_WR_CACHE |
7309 | << MVPP22_AXI_CODE_CACHE_OFFS; |
7310 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM |
7311 | << MVPP22_AXI_CODE_DOMAIN_OFFS; |
7312 | |
7313 | mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, data: val); |
7314 | } |
7315 | |
7316 | /* Initialize network controller common part HW */ |
7317 | static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) |
7318 | { |
7319 | const struct mbus_dram_target_info *dram_target_info; |
7320 | int err, i; |
7321 | u32 val; |
7322 | |
7323 | /* MBUS windows configuration */ |
7324 | dram_target_info = mv_mbus_dram_info(); |
7325 | if (dram_target_info) |
7326 | mvpp2_conf_mbus_windows(dram: dram_target_info, priv); |
7327 | |
7328 | if (priv->hw_version >= MVPP22) |
7329 | mvpp2_axi_init(priv); |
7330 | |
7331 | /* Disable HW PHY polling */ |
7332 | if (priv->hw_version == MVPP21) { |
7333 | val = readl(addr: priv->lms_base + MVPP2_PHY_AN_CFG0_REG); |
7334 | val |= MVPP2_PHY_AN_STOP_SMI0_MASK; |
7335 | writel(val, addr: priv->lms_base + MVPP2_PHY_AN_CFG0_REG); |
7336 | } else { |
7337 | val = readl(addr: priv->iface_base + MVPP22_SMI_MISC_CFG_REG); |
7338 | val &= ~MVPP22_SMI_POLLING_EN; |
7339 | writel(val, addr: priv->iface_base + MVPP22_SMI_MISC_CFG_REG); |
7340 | } |
7341 | |
7342 | /* Allocate and initialize aggregated TXQs */ |
7343 | priv->aggr_txqs = devm_kcalloc(dev: &pdev->dev, MVPP2_MAX_THREADS, |
7344 | size: sizeof(*priv->aggr_txqs), |
7345 | GFP_KERNEL); |
7346 | if (!priv->aggr_txqs) |
7347 | return -ENOMEM; |
7348 | |
7349 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
7350 | priv->aggr_txqs[i].id = i; |
7351 | priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; |
7352 | err = mvpp2_aggr_txq_init(pdev, aggr_txq: &priv->aggr_txqs[i], thread: i, priv); |
7353 | if (err < 0) |
7354 | return err; |
7355 | } |
7356 | |
7357 | /* Fifo Init */ |
7358 | if (priv->hw_version == MVPP21) { |
7359 | mvpp2_rx_fifo_init(priv); |
7360 | } else { |
7361 | mvpp22_rx_fifo_init(priv); |
7362 | mvpp22_tx_fifo_init(priv); |
7363 | if (priv->hw_version == MVPP23) |
7364 | mvpp23_rx_fifo_fc_set_tresh(priv); |
7365 | } |
7366 | |
7367 | if (priv->hw_version == MVPP21) |
7368 | writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, |
7369 | addr: priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); |
7370 | |
7371 | /* Allow cache snoop when transmiting packets */ |
7372 | mvpp2_write(priv, MVPP2_TX_SNOOP_REG, data: 0x1); |
7373 | |
7374 | /* Buffer Manager initialization */ |
7375 | err = mvpp2_bm_init(dev: &pdev->dev, priv); |
7376 | if (err < 0) |
7377 | return err; |
7378 | |
7379 | /* Parser default initialization */ |
7380 | err = mvpp2_prs_default_init(pdev, priv); |
7381 | if (err < 0) |
7382 | return err; |
7383 | |
7384 | /* Classifier default initialization */ |
7385 | mvpp2_cls_init(priv); |
7386 | |
7387 | return 0; |
7388 | } |
7389 | |
7390 | static int mvpp2_get_sram(struct platform_device *pdev, |
7391 | struct mvpp2 *priv) |
7392 | { |
7393 | struct resource *res; |
7394 | void __iomem *base; |
7395 | |
7396 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
7397 | if (!res) { |
7398 | if (has_acpi_companion(dev: &pdev->dev)) |
7399 | dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n" ); |
7400 | else |
7401 | dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n" ); |
7402 | return 0; |
7403 | } |
7404 | |
7405 | base = devm_ioremap_resource(dev: &pdev->dev, res); |
7406 | if (IS_ERR(ptr: base)) |
7407 | return PTR_ERR(ptr: base); |
7408 | |
7409 | priv->cm3_base = base; |
7410 | return 0; |
7411 | } |
7412 | |
7413 | static int mvpp2_probe(struct platform_device *pdev) |
7414 | { |
7415 | struct fwnode_handle *fwnode = pdev->dev.fwnode; |
7416 | struct fwnode_handle *port_fwnode; |
7417 | struct mvpp2 *priv; |
7418 | struct resource *res; |
7419 | void __iomem *base; |
7420 | int i, shared; |
7421 | int err; |
7422 | |
7423 | priv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*priv), GFP_KERNEL); |
7424 | if (!priv) |
7425 | return -ENOMEM; |
7426 | |
7427 | priv->hw_version = (unsigned long)device_get_match_data(dev: &pdev->dev); |
7428 | |
7429 | /* multi queue mode isn't supported on PPV2.1, fallback to single |
7430 | * mode |
7431 | */ |
7432 | if (priv->hw_version == MVPP21) |
7433 | queue_mode = MVPP2_QDIST_SINGLE_MODE; |
7434 | |
7435 | base = devm_platform_ioremap_resource(pdev, index: 0); |
7436 | if (IS_ERR(ptr: base)) |
7437 | return PTR_ERR(ptr: base); |
7438 | |
7439 | if (priv->hw_version == MVPP21) { |
7440 | priv->lms_base = devm_platform_ioremap_resource(pdev, index: 1); |
7441 | if (IS_ERR(ptr: priv->lms_base)) |
7442 | return PTR_ERR(ptr: priv->lms_base); |
7443 | } else { |
7444 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
7445 | if (!res) { |
7446 | dev_err(&pdev->dev, "Invalid resource\n" ); |
7447 | return -EINVAL; |
7448 | } |
7449 | if (has_acpi_companion(dev: &pdev->dev)) { |
7450 | /* In case the MDIO memory region is declared in |
7451 | * the ACPI, it can already appear as 'in-use' |
7452 | * in the OS. Because it is overlapped by second |
7453 | * region of the network controller, make |
7454 | * sure it is released, before requesting it again. |
7455 | * The care is taken by mvpp2 driver to avoid |
7456 | * concurrent access to this memory region. |
7457 | */ |
7458 | release_resource(new: res); |
7459 | } |
7460 | priv->iface_base = devm_ioremap_resource(dev: &pdev->dev, res); |
7461 | if (IS_ERR(ptr: priv->iface_base)) |
7462 | return PTR_ERR(ptr: priv->iface_base); |
7463 | |
7464 | /* Map CM3 SRAM */ |
7465 | err = mvpp2_get_sram(pdev, priv); |
7466 | if (err) |
7467 | dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n" ); |
7468 | |
7469 | /* Enable global Flow Control only if handler to SRAM not NULL */ |
7470 | if (priv->cm3_base) |
7471 | priv->global_tx_fc = true; |
7472 | } |
7473 | |
7474 | if (priv->hw_version >= MVPP22 && dev_of_node(dev: &pdev->dev)) { |
7475 | priv->sysctrl_base = |
7476 | syscon_regmap_lookup_by_phandle(np: pdev->dev.of_node, |
7477 | property: "marvell,system-controller" ); |
7478 | if (IS_ERR(ptr: priv->sysctrl_base)) |
7479 | /* The system controller regmap is optional for dt |
7480 | * compatibility reasons. When not provided, the |
7481 | * configuration of the GoP relies on the |
7482 | * firmware/bootloader. |
7483 | */ |
7484 | priv->sysctrl_base = NULL; |
7485 | } |
7486 | |
7487 | if (priv->hw_version >= MVPP22 && |
7488 | mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) |
7489 | priv->percpu_pools = 1; |
7490 | |
7491 | mvpp2_setup_bm_pool(); |
7492 | |
7493 | |
7494 | priv->nthreads = min_t(unsigned int, num_present_cpus(), |
7495 | MVPP2_MAX_THREADS); |
7496 | |
7497 | shared = num_present_cpus() - priv->nthreads; |
7498 | if (shared > 0) |
7499 | bitmap_set(map: &priv->lock_map, start: 0, |
7500 | min_t(int, shared, MVPP2_MAX_THREADS)); |
7501 | |
7502 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
7503 | u32 addr_space_sz; |
7504 | |
7505 | addr_space_sz = (priv->hw_version == MVPP21 ? |
7506 | MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); |
7507 | priv->swth_base[i] = base + i * addr_space_sz; |
7508 | } |
7509 | |
7510 | if (priv->hw_version == MVPP21) |
7511 | priv->max_port_rxqs = 8; |
7512 | else |
7513 | priv->max_port_rxqs = 32; |
7514 | |
7515 | if (dev_of_node(dev: &pdev->dev)) { |
7516 | priv->pp_clk = devm_clk_get(dev: &pdev->dev, id: "pp_clk" ); |
7517 | if (IS_ERR(ptr: priv->pp_clk)) |
7518 | return PTR_ERR(ptr: priv->pp_clk); |
7519 | err = clk_prepare_enable(clk: priv->pp_clk); |
7520 | if (err < 0) |
7521 | return err; |
7522 | |
7523 | priv->gop_clk = devm_clk_get(dev: &pdev->dev, id: "gop_clk" ); |
7524 | if (IS_ERR(ptr: priv->gop_clk)) { |
7525 | err = PTR_ERR(ptr: priv->gop_clk); |
7526 | goto err_pp_clk; |
7527 | } |
7528 | err = clk_prepare_enable(clk: priv->gop_clk); |
7529 | if (err < 0) |
7530 | goto err_pp_clk; |
7531 | |
7532 | if (priv->hw_version >= MVPP22) { |
7533 | priv->mg_clk = devm_clk_get(dev: &pdev->dev, id: "mg_clk" ); |
7534 | if (IS_ERR(ptr: priv->mg_clk)) { |
7535 | err = PTR_ERR(ptr: priv->mg_clk); |
7536 | goto err_gop_clk; |
7537 | } |
7538 | |
7539 | err = clk_prepare_enable(clk: priv->mg_clk); |
7540 | if (err < 0) |
7541 | goto err_gop_clk; |
7542 | |
7543 | priv->mg_core_clk = devm_clk_get_optional(dev: &pdev->dev, id: "mg_core_clk" ); |
7544 | if (IS_ERR(ptr: priv->mg_core_clk)) { |
7545 | err = PTR_ERR(ptr: priv->mg_core_clk); |
7546 | goto err_mg_clk; |
7547 | } |
7548 | |
7549 | err = clk_prepare_enable(clk: priv->mg_core_clk); |
7550 | if (err < 0) |
7551 | goto err_mg_clk; |
7552 | } |
7553 | |
7554 | priv->axi_clk = devm_clk_get_optional(dev: &pdev->dev, id: "axi_clk" ); |
7555 | if (IS_ERR(ptr: priv->axi_clk)) { |
7556 | err = PTR_ERR(ptr: priv->axi_clk); |
7557 | goto err_mg_core_clk; |
7558 | } |
7559 | |
7560 | err = clk_prepare_enable(clk: priv->axi_clk); |
7561 | if (err < 0) |
7562 | goto err_mg_core_clk; |
7563 | |
7564 | /* Get system's tclk rate */ |
7565 | priv->tclk = clk_get_rate(clk: priv->pp_clk); |
7566 | } else { |
7567 | err = device_property_read_u32(dev: &pdev->dev, propname: "clock-frequency" , val: &priv->tclk); |
7568 | if (err) { |
7569 | dev_err(&pdev->dev, "missing clock-frequency value\n" ); |
7570 | return err; |
7571 | } |
7572 | } |
7573 | |
7574 | if (priv->hw_version >= MVPP22) { |
7575 | err = dma_set_mask(dev: &pdev->dev, MVPP2_DESC_DMA_MASK); |
7576 | if (err) |
7577 | goto err_axi_clk; |
7578 | /* Sadly, the BM pools all share the same register to |
7579 | * store the high 32 bits of their address. So they |
7580 | * must all have the same high 32 bits, which forces |
7581 | * us to restrict coherent memory to DMA_BIT_MASK(32). |
7582 | */ |
7583 | err = dma_set_coherent_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
7584 | if (err) |
7585 | goto err_axi_clk; |
7586 | } |
7587 | |
7588 | /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ |
7589 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
7590 | if (!fwnode_property_read_u32(fwnode: port_fwnode, propname: "port-id" , val: &i)) |
7591 | priv->port_map |= BIT(i); |
7592 | } |
7593 | |
7594 | if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) |
7595 | priv->hw_version = MVPP23; |
7596 | |
7597 | /* Init mss lock */ |
7598 | spin_lock_init(&priv->mss_spinlock); |
7599 | |
7600 | /* Initialize network controller */ |
7601 | err = mvpp2_init(pdev, priv); |
7602 | if (err < 0) { |
7603 | dev_err(&pdev->dev, "failed to initialize controller\n" ); |
7604 | goto err_axi_clk; |
7605 | } |
7606 | |
7607 | err = mvpp22_tai_probe(dev: &pdev->dev, priv); |
7608 | if (err < 0) |
7609 | goto err_axi_clk; |
7610 | |
7611 | /* Initialize ports */ |
7612 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
7613 | err = mvpp2_port_probe(pdev, port_fwnode, priv); |
7614 | if (err < 0) |
7615 | goto err_port_probe; |
7616 | } |
7617 | |
7618 | if (priv->port_count == 0) { |
7619 | dev_err(&pdev->dev, "no ports enabled\n" ); |
7620 | err = -ENODEV; |
7621 | goto err_axi_clk; |
7622 | } |
7623 | |
7624 | /* Statistics must be gathered regularly because some of them (like |
7625 | * packets counters) are 32-bit registers and could overflow quite |
7626 | * quickly. For instance, a 10Gb link used at full bandwidth with the |
7627 | * smallest packets (64B) will overflow a 32-bit counter in less than |
7628 | * 30 seconds. Then, use a workqueue to fill 64-bit counters. |
7629 | */ |
7630 | snprintf(buf: priv->queue_name, size: sizeof(priv->queue_name), |
7631 | fmt: "stats-wq-%s%s" , netdev_name(dev: priv->port_list[0]->dev), |
7632 | priv->port_count > 1 ? "+" : "" ); |
7633 | priv->stats_queue = create_singlethread_workqueue(priv->queue_name); |
7634 | if (!priv->stats_queue) { |
7635 | err = -ENOMEM; |
7636 | goto err_port_probe; |
7637 | } |
7638 | |
7639 | if (priv->global_tx_fc && priv->hw_version >= MVPP22) { |
7640 | err = mvpp2_enable_global_fc(priv); |
7641 | if (err) |
7642 | dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n" ); |
7643 | } |
7644 | |
7645 | mvpp2_dbgfs_init(priv, name: pdev->name); |
7646 | |
7647 | platform_set_drvdata(pdev, data: priv); |
7648 | return 0; |
7649 | |
7650 | err_port_probe: |
7651 | fwnode_handle_put(fwnode: port_fwnode); |
7652 | |
7653 | i = 0; |
7654 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
7655 | if (priv->port_list[i]) |
7656 | mvpp2_port_remove(port: priv->port_list[i]); |
7657 | i++; |
7658 | } |
7659 | err_axi_clk: |
7660 | clk_disable_unprepare(clk: priv->axi_clk); |
7661 | err_mg_core_clk: |
7662 | clk_disable_unprepare(clk: priv->mg_core_clk); |
7663 | err_mg_clk: |
7664 | clk_disable_unprepare(clk: priv->mg_clk); |
7665 | err_gop_clk: |
7666 | clk_disable_unprepare(clk: priv->gop_clk); |
7667 | err_pp_clk: |
7668 | clk_disable_unprepare(clk: priv->pp_clk); |
7669 | return err; |
7670 | } |
7671 | |
7672 | static void mvpp2_remove(struct platform_device *pdev) |
7673 | { |
7674 | struct mvpp2 *priv = platform_get_drvdata(pdev); |
7675 | struct fwnode_handle *fwnode = pdev->dev.fwnode; |
7676 | int i = 0, poolnum = MVPP2_BM_POOLS_NUM; |
7677 | struct fwnode_handle *port_fwnode; |
7678 | |
7679 | mvpp2_dbgfs_cleanup(priv); |
7680 | |
7681 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
7682 | if (priv->port_list[i]) { |
7683 | mutex_destroy(lock: &priv->port_list[i]->gather_stats_lock); |
7684 | mvpp2_port_remove(port: priv->port_list[i]); |
7685 | } |
7686 | i++; |
7687 | } |
7688 | |
7689 | destroy_workqueue(wq: priv->stats_queue); |
7690 | |
7691 | if (priv->percpu_pools) |
7692 | poolnum = mvpp2_get_nrxqs(priv) * 2; |
7693 | |
7694 | for (i = 0; i < poolnum; i++) { |
7695 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; |
7696 | |
7697 | mvpp2_bm_pool_destroy(dev: &pdev->dev, priv, bm_pool); |
7698 | } |
7699 | |
7700 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
7701 | struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; |
7702 | |
7703 | dma_free_coherent(dev: &pdev->dev, |
7704 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, |
7705 | cpu_addr: aggr_txq->descs, |
7706 | dma_handle: aggr_txq->descs_dma); |
7707 | } |
7708 | |
7709 | if (is_acpi_node(fwnode: port_fwnode)) |
7710 | return; |
7711 | |
7712 | clk_disable_unprepare(clk: priv->axi_clk); |
7713 | clk_disable_unprepare(clk: priv->mg_core_clk); |
7714 | clk_disable_unprepare(clk: priv->mg_clk); |
7715 | clk_disable_unprepare(clk: priv->pp_clk); |
7716 | clk_disable_unprepare(clk: priv->gop_clk); |
7717 | } |
7718 | |
7719 | static const struct of_device_id mvpp2_match[] = { |
7720 | { |
7721 | .compatible = "marvell,armada-375-pp2" , |
7722 | .data = (void *)MVPP21, |
7723 | }, |
7724 | { |
7725 | .compatible = "marvell,armada-7k-pp22" , |
7726 | .data = (void *)MVPP22, |
7727 | }, |
7728 | { } |
7729 | }; |
7730 | MODULE_DEVICE_TABLE(of, mvpp2_match); |
7731 | |
7732 | #ifdef CONFIG_ACPI |
7733 | static const struct acpi_device_id mvpp2_acpi_match[] = { |
7734 | { "MRVL0110" , MVPP22 }, |
7735 | { }, |
7736 | }; |
7737 | MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); |
7738 | #endif |
7739 | |
7740 | static struct platform_driver mvpp2_driver = { |
7741 | .probe = mvpp2_probe, |
7742 | .remove_new = mvpp2_remove, |
7743 | .driver = { |
7744 | .name = MVPP2_DRIVER_NAME, |
7745 | .of_match_table = mvpp2_match, |
7746 | .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), |
7747 | }, |
7748 | }; |
7749 | |
7750 | static int __init mvpp2_driver_init(void) |
7751 | { |
7752 | return platform_driver_register(&mvpp2_driver); |
7753 | } |
7754 | module_init(mvpp2_driver_init); |
7755 | |
7756 | static void __exit mvpp2_driver_exit(void) |
7757 | { |
7758 | platform_driver_unregister(&mvpp2_driver); |
7759 | mvpp2_dbgfs_exit(); |
7760 | } |
7761 | module_exit(mvpp2_driver_exit); |
7762 | |
7763 | MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com" ); |
7764 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>" ); |
7765 | MODULE_LICENSE("GPL v2" ); |
7766 | |