1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2019 Solarflare Communications Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ |
10 | |
11 | #include "net_driver.h" |
12 | #include "efx.h" |
13 | #include "nic.h" |
14 | #include "mcdi_functions.h" |
15 | #include "mcdi.h" |
16 | #include "mcdi_pcol.h" |
17 | |
18 | int efx_mcdi_free_vis(struct efx_nic *efx) |
19 | { |
20 | MCDI_DECLARE_BUF_ERR(outbuf); |
21 | size_t outlen; |
22 | int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, inlen: 0, |
23 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
24 | |
25 | /* -EALREADY means nothing to free, so ignore */ |
26 | if (rc == -EALREADY) |
27 | rc = 0; |
28 | if (rc) |
29 | efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, inlen: 0, outbuf, outlen, |
30 | rc); |
31 | return rc; |
32 | } |
33 | |
34 | int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis, |
35 | unsigned int max_vis, unsigned int *vi_base, |
36 | unsigned int *allocated_vis) |
37 | { |
38 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); |
39 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); |
40 | size_t outlen; |
41 | int rc; |
42 | |
43 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); |
44 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); |
45 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, inlen: sizeof(inbuf), |
46 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
47 | if (rc != 0) |
48 | return rc; |
49 | |
50 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) |
51 | return -EIO; |
52 | |
53 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n" , |
54 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); |
55 | |
56 | if (vi_base) |
57 | *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); |
58 | if (allocated_vis) |
59 | *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); |
60 | return 0; |
61 | } |
62 | |
63 | int efx_mcdi_ev_probe(struct efx_channel *channel) |
64 | { |
65 | return efx_nic_alloc_buffer(efx: channel->efx, buffer: &channel->eventq, |
66 | len: (channel->eventq_mask + 1) * |
67 | sizeof(efx_qword_t), |
68 | GFP_KERNEL); |
69 | } |
70 | |
71 | int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2) |
72 | { |
73 | MCDI_DECLARE_BUF(inbuf, |
74 | MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / |
75 | EFX_BUF_SIZE)); |
76 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); |
77 | size_t entries = channel->eventq.len / EFX_BUF_SIZE; |
78 | struct efx_nic *efx = channel->efx; |
79 | size_t inlen, outlen; |
80 | dma_addr_t dma_addr; |
81 | int rc, i; |
82 | |
83 | /* Fill event queue with all ones (i.e. empty events) */ |
84 | memset(channel->eventq.addr, 0xff, channel->eventq.len); |
85 | |
86 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); |
87 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); |
88 | /* INIT_EVQ expects index in vector table, not absolute */ |
89 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); |
90 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, |
91 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); |
92 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); |
93 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); |
94 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, |
95 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); |
96 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); |
97 | |
98 | if (v2) { |
99 | /* Use the new generic approach to specifying event queue |
100 | * configuration, requesting lower latency or higher throughput. |
101 | * The options that actually get used appear in the output. |
102 | */ |
103 | MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, |
104 | INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, |
105 | INIT_EVQ_V2_IN_FLAG_TYPE, |
106 | MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); |
107 | } else { |
108 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, |
109 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, |
110 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, |
111 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, |
112 | INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru); |
113 | } |
114 | |
115 | dma_addr = channel->eventq.dma_addr; |
116 | for (i = 0; i < entries; ++i) { |
117 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); |
118 | dma_addr += EFX_BUF_SIZE; |
119 | } |
120 | |
121 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); |
122 | |
123 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, |
124 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
125 | |
126 | if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) |
127 | netif_dbg(efx, drv, efx->net_dev, |
128 | "Channel %d using event queue flags %08x\n" , |
129 | channel->channel, |
130 | MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); |
131 | |
132 | return rc; |
133 | } |
134 | |
135 | void efx_mcdi_ev_remove(struct efx_channel *channel) |
136 | { |
137 | efx_nic_free_buffer(efx: channel->efx, buffer: &channel->eventq); |
138 | } |
139 | |
140 | void efx_mcdi_ev_fini(struct efx_channel *channel) |
141 | { |
142 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); |
143 | MCDI_DECLARE_BUF_ERR(outbuf); |
144 | struct efx_nic *efx = channel->efx; |
145 | size_t outlen; |
146 | int rc; |
147 | |
148 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); |
149 | |
150 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, inlen: sizeof(inbuf), |
151 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
152 | |
153 | if (rc && rc != -EALREADY) |
154 | goto fail; |
155 | |
156 | return; |
157 | |
158 | fail: |
159 | efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, |
160 | outbuf, outlen, rc); |
161 | } |
162 | |
163 | int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue) |
164 | { |
165 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
166 | EFX_BUF_SIZE)); |
167 | bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; |
168 | bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM; |
169 | size_t entries = tx_queue->txd.len / EFX_BUF_SIZE; |
170 | struct efx_channel *channel = tx_queue->channel; |
171 | struct efx_nic *efx = tx_queue->efx; |
172 | dma_addr_t dma_addr; |
173 | size_t inlen; |
174 | int rc, i; |
175 | |
176 | BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); |
177 | |
178 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); |
179 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); |
180 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label); |
181 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); |
182 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); |
183 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); |
184 | |
185 | dma_addr = tx_queue->txd.dma_addr; |
186 | |
187 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n" , |
188 | tx_queue->queue, entries, (u64)dma_addr); |
189 | |
190 | for (i = 0; i < entries; ++i) { |
191 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); |
192 | dma_addr += EFX_BUF_SIZE; |
193 | } |
194 | |
195 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); |
196 | |
197 | do { |
198 | bool tso_v2 = tx_queue->tso_version == 2; |
199 | |
200 | /* TSOv2 implies IP header checksum offload for TSO frames, |
201 | * so we can safely disable IP header checksum offload for |
202 | * everything else. If we don't have TSOv2, then we have to |
203 | * enable IP header checksum offload, which is strictly |
204 | * incorrect but better than breaking TSO. |
205 | */ |
206 | MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS, |
207 | /* This flag was removed from mcdi_pcol.h for |
208 | * the non-_EXT version of INIT_TXQ. However, |
209 | * firmware still honours it. |
210 | */ |
211 | INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, |
212 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2), |
213 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload, |
214 | INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping, |
215 | INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2, |
216 | INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum); |
217 | |
218 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, |
219 | NULL, outlen: 0, NULL); |
220 | if (rc == -ENOSPC && tso_v2) { |
221 | /* Retry without TSOv2 if we're short on contexts. */ |
222 | tx_queue->tso_version = 0; |
223 | netif_warn(efx, probe, efx->net_dev, |
224 | "TSOv2 context not available to segment in " |
225 | "hardware. TCP performance may be reduced.\n" |
226 | ); |
227 | } else if (rc) { |
228 | efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, |
229 | MC_CMD_INIT_TXQ_EXT_IN_LEN, |
230 | NULL, outlen: 0, rc); |
231 | goto fail; |
232 | } |
233 | } while (rc); |
234 | |
235 | return 0; |
236 | |
237 | fail: |
238 | return rc; |
239 | } |
240 | |
241 | void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue) |
242 | { |
243 | efx_nic_free_buffer(efx: tx_queue->efx, buffer: &tx_queue->txd); |
244 | } |
245 | |
246 | void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue) |
247 | { |
248 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); |
249 | MCDI_DECLARE_BUF_ERR(outbuf); |
250 | struct efx_nic *efx = tx_queue->efx; |
251 | size_t outlen; |
252 | int rc; |
253 | |
254 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, |
255 | tx_queue->queue); |
256 | |
257 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, inlen: sizeof(inbuf), |
258 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
259 | |
260 | if (rc && rc != -EALREADY) |
261 | goto fail; |
262 | |
263 | return; |
264 | |
265 | fail: |
266 | efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, |
267 | outbuf, outlen, rc); |
268 | } |
269 | |
270 | int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) |
271 | { |
272 | return efx_nic_alloc_buffer(efx: rx_queue->efx, buffer: &rx_queue->rxd, |
273 | len: (rx_queue->ptr_mask + 1) * |
274 | sizeof(efx_qword_t), |
275 | GFP_KERNEL); |
276 | } |
277 | |
278 | void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) |
279 | { |
280 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
281 | size_t entries = rx_queue->rxd.len / EFX_BUF_SIZE; |
282 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); |
283 | struct efx_nic *efx = rx_queue->efx; |
284 | unsigned int buffer_size; |
285 | dma_addr_t dma_addr; |
286 | int rc; |
287 | int i; |
288 | BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); |
289 | |
290 | rx_queue->scatter_n = 0; |
291 | rx_queue->scatter_len = 0; |
292 | if (efx->type->revision == EFX_REV_EF100) |
293 | buffer_size = efx->rx_page_buf_step; |
294 | else |
295 | buffer_size = 0; |
296 | |
297 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); |
298 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); |
299 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); |
300 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, |
301 | efx_rx_queue_index(rx_queue)); |
302 | MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, |
303 | INIT_RXQ_IN_FLAG_PREFIX, 1, |
304 | INIT_RXQ_IN_FLAG_TIMESTAMP, 1); |
305 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); |
306 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id); |
307 | MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size); |
308 | |
309 | dma_addr = rx_queue->rxd.dma_addr; |
310 | |
311 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n" , |
312 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); |
313 | |
314 | for (i = 0; i < entries; ++i) { |
315 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); |
316 | dma_addr += EFX_BUF_SIZE; |
317 | } |
318 | |
319 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen: sizeof(inbuf), |
320 | NULL, outlen: 0, NULL); |
321 | if (rc) |
322 | netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n" , |
323 | efx_rx_queue_index(rx_queue)); |
324 | } |
325 | |
326 | void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue) |
327 | { |
328 | efx_nic_free_buffer(efx: rx_queue->efx, buffer: &rx_queue->rxd); |
329 | } |
330 | |
331 | void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue) |
332 | { |
333 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); |
334 | MCDI_DECLARE_BUF_ERR(outbuf); |
335 | struct efx_nic *efx = rx_queue->efx; |
336 | size_t outlen; |
337 | int rc; |
338 | |
339 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, |
340 | efx_rx_queue_index(rx_queue)); |
341 | |
342 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, inlen: sizeof(inbuf), |
343 | outbuf, outlen: sizeof(outbuf), outlen_actual: &outlen); |
344 | |
345 | if (rc && rc != -EALREADY) |
346 | goto fail; |
347 | |
348 | return; |
349 | |
350 | fail: |
351 | efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, |
352 | outbuf, outlen, rc); |
353 | } |
354 | |
355 | int efx_fini_dmaq(struct efx_nic *efx) |
356 | { |
357 | struct efx_tx_queue *tx_queue; |
358 | struct efx_rx_queue *rx_queue; |
359 | struct efx_channel *channel; |
360 | int pending; |
361 | |
362 | /* If the MC has just rebooted, the TX/RX queues will have already been |
363 | * torn down, but efx->active_queues needs to be set to zero. |
364 | */ |
365 | if (efx->must_realloc_vis) { |
366 | atomic_set(v: &efx->active_queues, i: 0); |
367 | return 0; |
368 | } |
369 | |
370 | /* Do not attempt to write to the NIC during EEH recovery */ |
371 | if (efx->state != STATE_RECOVERY) { |
372 | efx_for_each_channel(channel, efx) { |
373 | efx_for_each_channel_rx_queue(rx_queue, channel) |
374 | efx_mcdi_rx_fini(rx_queue); |
375 | efx_for_each_channel_tx_queue(tx_queue, channel) |
376 | efx_mcdi_tx_fini(tx_queue); |
377 | } |
378 | |
379 | wait_event_timeout(efx->flush_wq, |
380 | atomic_read(&efx->active_queues) == 0, |
381 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); |
382 | pending = atomic_read(v: &efx->active_queues); |
383 | if (pending) { |
384 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n" , |
385 | pending); |
386 | return -ETIMEDOUT; |
387 | } |
388 | } |
389 | |
390 | return 0; |
391 | } |
392 | |
393 | int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode) |
394 | { |
395 | switch (vi_window_mode) { |
396 | case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: |
397 | efx->vi_stride = 8192; |
398 | break; |
399 | case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: |
400 | efx->vi_stride = 16384; |
401 | break; |
402 | case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: |
403 | efx->vi_stride = 65536; |
404 | break; |
405 | default: |
406 | netif_err(efx, probe, efx->net_dev, |
407 | "Unrecognised VI window mode %d\n" , |
408 | vi_window_mode); |
409 | return -EIO; |
410 | } |
411 | netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n" , |
412 | efx->vi_stride); |
413 | return 0; |
414 | } |
415 | |
416 | int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index) |
417 | { |
418 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
419 | size_t outlen; |
420 | int rc; |
421 | |
422 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, inlen: 0, outbuf, |
423 | outlen: sizeof(outbuf), outlen_actual: &outlen); |
424 | if (rc) |
425 | return rc; |
426 | if (outlen < sizeof(outbuf)) |
427 | return -EIO; |
428 | |
429 | *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); |
430 | return 0; |
431 | } |
432 | |