1// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2
3/* Packet receive logic for Mellanox Gigabit Ethernet driver
4 *
5 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6 */
7
8#include <linux/etherdevice.h>
9#include <linux/skbuff.h>
10
11#include "mlxbf_gige.h"
12#include "mlxbf_gige_regs.h"
13
14void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
15 unsigned int index, u64 dmac)
16{
17 void __iomem *base = priv->base;
18 u64 control;
19
20 /* Write destination MAC to specified MAC RX filter */
21 writeq(val: dmac, addr: base + MLXBF_GIGE_RX_MAC_FILTER +
22 (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
23
24 /* Enable MAC receive filter mask for specified index */
25 control = readq(addr: base + MLXBF_GIGE_CONTROL);
26 control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
27 writeq(val: control, addr: base + MLXBF_GIGE_CONTROL);
28}
29
30void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
31 unsigned int index, u64 *dmac)
32{
33 void __iomem *base = priv->base;
34
35 /* Read destination MAC from specified MAC RX filter */
36 *dmac = readq(addr: base + MLXBF_GIGE_RX_MAC_FILTER +
37 (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
38}
39
40void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
41{
42 void __iomem *base = priv->base;
43 u64 control;
44 u64 end_mac;
45
46 /* Enable MAC_ID_RANGE match functionality */
47 control = readq(addr: base + MLXBF_GIGE_CONTROL);
48 control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
49 writeq(val: control, addr: base + MLXBF_GIGE_CONTROL);
50
51 /* Set start of destination MAC range check to 0 */
52 writeq(val: 0, addr: base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
53
54 /* Set end of destination MAC range check to all FFs */
55 end_mac = BCAST_MAC_ADDR;
56 writeq(val: end_mac, addr: base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
57}
58
59void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
60{
61 void __iomem *base = priv->base;
62 u64 control;
63
64 /* Disable MAC_ID_RANGE match functionality */
65 control = readq(addr: base + MLXBF_GIGE_CONTROL);
66 control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
67 writeq(val: control, addr: base + MLXBF_GIGE_CONTROL);
68
69 /* NOTE: no need to change DMAC_RANGE_START or END;
70 * those values are ignored since MAC_ID_RANGE_EN=0
71 */
72}
73
74/* Receive Initialization
75 * 1) Configures RX MAC filters via MMIO registers
76 * 2) Allocates RX WQE array using coherent DMA mapping
77 * 3) Initializes each element of RX WQE array with a receive
78 * buffer pointer (also using coherent DMA mapping)
79 * 4) Allocates RX CQE array using coherent DMA mapping
80 * 5) Completes other misc receive initialization
81 */
82int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
83{
84 size_t wq_size, cq_size;
85 dma_addr_t *rx_wqe_ptr;
86 dma_addr_t rx_buf_dma;
87 u64 data;
88 int i, j;
89
90 /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
91 mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
92 BCAST_MAC_ADDR);
93
94 wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
95 priv->rx_wqe_base = dma_alloc_coherent(dev: priv->dev, size: wq_size,
96 dma_handle: &priv->rx_wqe_base_dma,
97 GFP_KERNEL);
98 if (!priv->rx_wqe_base)
99 return -ENOMEM;
100
101 /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
102 * Each RX WQE is simply a receive buffer pointer, so walk
103 * the entire array, allocating a 2KB buffer for each element
104 */
105 rx_wqe_ptr = priv->rx_wqe_base;
106
107 for (i = 0; i < priv->rx_q_entries; i++) {
108 priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
109 buf_dma: &rx_buf_dma, dir: DMA_FROM_DEVICE);
110 if (!priv->rx_skb[i])
111 goto free_wqe_and_skb;
112 *rx_wqe_ptr++ = rx_buf_dma;
113 }
114
115 /* Write RX WQE base address into MMIO reg */
116 writeq(val: priv->rx_wqe_base_dma, addr: priv->base + MLXBF_GIGE_RX_WQ_BASE);
117
118 cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
119 priv->rx_cqe_base = dma_alloc_coherent(dev: priv->dev, size: cq_size,
120 dma_handle: &priv->rx_cqe_base_dma,
121 GFP_KERNEL);
122 if (!priv->rx_cqe_base)
123 goto free_wqe_and_skb;
124
125 for (i = 0; i < priv->rx_q_entries; i++)
126 priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
127
128 /* Write RX CQE base address into MMIO reg */
129 writeq(val: priv->rx_cqe_base_dma, addr: priv->base + MLXBF_GIGE_RX_CQ_BASE);
130
131 /* Write RX_WQE_PI with current number of replenished buffers */
132 writeq(val: priv->rx_q_entries, addr: priv->base + MLXBF_GIGE_RX_WQE_PI);
133
134 /* Enable removal of CRC during RX */
135 data = readq(addr: priv->base + MLXBF_GIGE_RX);
136 data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
137 writeq(val: data, addr: priv->base + MLXBF_GIGE_RX);
138
139 /* Enable RX MAC filter pass and discard counters */
140 writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
141 addr: priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
142 writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
143 addr: priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
144
145 /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
146 * indicate readiness to receive interrupts
147 */
148 data = readq(addr: priv->base + MLXBF_GIGE_INT_MASK);
149 data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
150 writeq(val: data, addr: priv->base + MLXBF_GIGE_INT_MASK);
151
152 /* Enable RX DMA to write new packets to memory */
153 data = readq(addr: priv->base + MLXBF_GIGE_RX_DMA);
154 data |= MLXBF_GIGE_RX_DMA_EN;
155 writeq(val: data, addr: priv->base + MLXBF_GIGE_RX_DMA);
156
157 writeq(ilog2(priv->rx_q_entries),
158 addr: priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
159
160 return 0;
161
162free_wqe_and_skb:
163 rx_wqe_ptr = priv->rx_wqe_base;
164 for (j = 0; j < i; j++) {
165 dma_unmap_single(priv->dev, *rx_wqe_ptr,
166 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
167 dev_kfree_skb(priv->rx_skb[j]);
168 rx_wqe_ptr++;
169 }
170 dma_free_coherent(dev: priv->dev, size: wq_size,
171 cpu_addr: priv->rx_wqe_base, dma_handle: priv->rx_wqe_base_dma);
172 return -ENOMEM;
173}
174
175/* Receive Deinitialization
176 * This routine will free allocations done by mlxbf_gige_rx_init(),
177 * namely the RX WQE and RX CQE arrays, as well as all RX buffers
178 */
179void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
180{
181 dma_addr_t *rx_wqe_ptr;
182 size_t size;
183 u64 data;
184 int i;
185
186 /* Disable RX DMA to prevent packet transfers to memory */
187 data = readq(addr: priv->base + MLXBF_GIGE_RX_DMA);
188 data &= ~MLXBF_GIGE_RX_DMA_EN;
189 writeq(val: data, addr: priv->base + MLXBF_GIGE_RX_DMA);
190
191 rx_wqe_ptr = priv->rx_wqe_base;
192
193 for (i = 0; i < priv->rx_q_entries; i++) {
194 dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
195 DMA_FROM_DEVICE);
196 dev_kfree_skb(priv->rx_skb[i]);
197 rx_wqe_ptr++;
198 }
199
200 size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
201 dma_free_coherent(dev: priv->dev, size,
202 cpu_addr: priv->rx_wqe_base, dma_handle: priv->rx_wqe_base_dma);
203
204 size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
205 dma_free_coherent(dev: priv->dev, size,
206 cpu_addr: priv->rx_cqe_base, dma_handle: priv->rx_cqe_base_dma);
207
208 priv->rx_wqe_base = NULL;
209 priv->rx_wqe_base_dma = 0;
210 priv->rx_cqe_base = NULL;
211 priv->rx_cqe_base_dma = 0;
212 writeq(val: 0, addr: priv->base + MLXBF_GIGE_RX_WQ_BASE);
213 writeq(val: 0, addr: priv->base + MLXBF_GIGE_RX_CQ_BASE);
214}
215
216static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
217{
218 struct net_device *netdev = priv->netdev;
219 struct sk_buff *skb = NULL, *rx_skb;
220 u16 rx_pi_rem, rx_ci_rem;
221 dma_addr_t *rx_wqe_addr;
222 dma_addr_t rx_buf_dma;
223 u64 *rx_cqe_addr;
224 u64 datalen;
225 u64 rx_cqe;
226 u16 rx_ci;
227 u16 rx_pi;
228
229 /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
230 rx_pi = readq(addr: priv->base + MLXBF_GIGE_RX_WQE_PI);
231 rx_pi_rem = rx_pi % priv->rx_q_entries;
232
233 rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
234 rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
235 rx_cqe = *rx_cqe_addr;
236
237 if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
238 return false;
239
240 if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
241 /* Packet is OK, increment stats */
242 datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
243 netdev->stats.rx_packets++;
244 netdev->stats.rx_bytes += datalen;
245
246 skb = priv->rx_skb[rx_pi_rem];
247
248 /* Alloc another RX SKB for this same index */
249 rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
250 buf_dma: &rx_buf_dma, dir: DMA_FROM_DEVICE);
251 if (!rx_skb)
252 return false;
253 priv->rx_skb[rx_pi_rem] = rx_skb;
254 dma_unmap_single(priv->dev, *rx_wqe_addr,
255 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
256
257 skb_put(skb, len: datalen);
258
259 skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
260
261 skb->protocol = eth_type_trans(skb, dev: netdev);
262
263 *rx_wqe_addr = rx_buf_dma;
264 } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
265 priv->stats.rx_mac_errors++;
266 } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
267 priv->stats.rx_truncate_errors++;
268 }
269
270 /* Let hardware know we've replenished one buffer */
271 rx_pi++;
272
273 /* Ensure completion of all writes before notifying HW of replenish */
274 wmb();
275 writeq(val: rx_pi, addr: priv->base + MLXBF_GIGE_RX_WQE_PI);
276
277 (*rx_pkts)++;
278
279 rx_pi_rem = rx_pi % priv->rx_q_entries;
280 if (rx_pi_rem == 0)
281 priv->valid_polarity ^= 1;
282 rx_ci = readq(addr: priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
283 rx_ci_rem = rx_ci % priv->rx_q_entries;
284
285 if (skb)
286 netif_receive_skb(skb);
287
288 return rx_pi_rem != rx_ci_rem;
289}
290
291/* Driver poll() function called by NAPI infrastructure */
292int mlxbf_gige_poll(struct napi_struct *napi, int budget)
293{
294 struct mlxbf_gige *priv;
295 bool remaining_pkts;
296 int work_done = 0;
297 u64 data;
298
299 priv = container_of(napi, struct mlxbf_gige, napi);
300
301 mlxbf_gige_handle_tx_complete(priv);
302
303 do {
304 remaining_pkts = mlxbf_gige_rx_packet(priv, rx_pkts: &work_done);
305 } while (remaining_pkts && work_done < budget);
306
307 /* If amount of work done < budget, turn off NAPI polling
308 * via napi_complete_done(napi, work_done) and then
309 * re-enable interrupts.
310 */
311 if (work_done < budget && napi_complete_done(n: napi, work_done)) {
312 /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
313 * indicate receive readiness
314 */
315 data = readq(addr: priv->base + MLXBF_GIGE_INT_MASK);
316 data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
317 writeq(val: data, addr: priv->base + MLXBF_GIGE_INT_MASK);
318 }
319
320 return work_done;
321}
322

source code of linux/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c