1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2005-2006 Fen Systems Ltd. |
5 | * Copyright 2006-2013 Solarflare Communications Inc. |
6 | */ |
7 | |
8 | #include <linux/bitops.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/module.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/crc32.h> |
15 | #include "net_driver.h" |
16 | #include "bitfield.h" |
17 | #include "efx.h" |
18 | #include "rx_common.h" |
19 | #include "tx_common.h" |
20 | #include "nic.h" |
21 | #include "farch_regs.h" |
22 | #include "sriov.h" |
23 | #include "siena_sriov.h" |
24 | #include "io.h" |
25 | #include "workarounds.h" |
26 | |
27 | /* Falcon-architecture (SFC9000-family) support */ |
28 | |
29 | /************************************************************************** |
30 | * |
31 | * Configurable values |
32 | * |
33 | ************************************************************************** |
34 | */ |
35 | |
36 | /* This is set to 16 for a good reason. In summary, if larger than |
37 | * 16, the descriptor cache holds more than a default socket |
38 | * buffer's worth of packets (for UDP we can only have at most one |
39 | * socket buffer's worth outstanding). This combined with the fact |
40 | * that we only get 1 TX event per descriptor cache means the NIC |
41 | * goes idle. |
42 | */ |
43 | #define TX_DC_ENTRIES 16 |
44 | #define TX_DC_ENTRIES_ORDER 1 |
45 | |
46 | #define RX_DC_ENTRIES 64 |
47 | #define RX_DC_ENTRIES_ORDER 3 |
48 | |
49 | /* If EFX_MAX_INT_ERRORS internal errors occur within |
50 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and |
51 | * disable it. |
52 | */ |
53 | #define EFX_INT_ERROR_EXPIRE 3600 |
54 | #define EFX_MAX_INT_ERRORS 5 |
55 | |
56 | /* Depth of RX flush request fifo */ |
57 | #define EFX_RX_FLUSH_COUNT 4 |
58 | |
59 | /* Driver generated events */ |
60 | #define _EFX_CHANNEL_MAGIC_TEST 0x000101 |
61 | #define _EFX_CHANNEL_MAGIC_FILL 0x000102 |
62 | #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 |
63 | #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 |
64 | |
65 | #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) |
66 | #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) |
67 | |
68 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ |
69 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) |
70 | #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ |
71 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ |
72 | efx_rx_queue_index(_rx_queue)) |
73 | #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ |
74 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ |
75 | efx_rx_queue_index(_rx_queue)) |
76 | #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ |
77 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ |
78 | (_tx_queue)->queue) |
79 | |
80 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); |
81 | |
82 | /************************************************************************** |
83 | * |
84 | * Hardware access |
85 | * |
86 | **************************************************************************/ |
87 | |
88 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, |
89 | unsigned int index) |
90 | { |
91 | efx_sram_writeq(efx, membase: efx->membase + efx->type->buf_tbl_base, |
92 | value, index); |
93 | } |
94 | |
95 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, |
96 | const efx_oword_t *mask) |
97 | { |
98 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || |
99 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); |
100 | } |
101 | |
102 | int efx_farch_test_registers(struct efx_nic *efx, |
103 | const struct efx_farch_register_test *regs, |
104 | size_t n_regs) |
105 | { |
106 | unsigned address = 0; |
107 | int i, j; |
108 | efx_oword_t mask, imask, original, reg, buf; |
109 | |
110 | for (i = 0; i < n_regs; ++i) { |
111 | address = regs[i].address; |
112 | mask = imask = regs[i].mask; |
113 | EFX_INVERT_OWORD(imask); |
114 | |
115 | efx_reado(efx, value: &original, reg: address); |
116 | |
117 | /* bit sweep on and off */ |
118 | for (j = 0; j < 128; j++) { |
119 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) |
120 | continue; |
121 | |
122 | /* Test this testable bit can be set in isolation */ |
123 | EFX_AND_OWORD(reg, original, mask); |
124 | EFX_SET_OWORD32(reg, j, j, 1); |
125 | |
126 | efx_writeo(efx, value: ®, reg: address); |
127 | efx_reado(efx, value: &buf, reg: address); |
128 | |
129 | if (efx_masked_compare_oword(a: ®, b: &buf, mask: &mask)) |
130 | goto fail; |
131 | |
132 | /* Test this testable bit can be cleared in isolation */ |
133 | EFX_OR_OWORD(reg, original, mask); |
134 | EFX_SET_OWORD32(reg, j, j, 0); |
135 | |
136 | efx_writeo(efx, value: ®, reg: address); |
137 | efx_reado(efx, value: &buf, reg: address); |
138 | |
139 | if (efx_masked_compare_oword(a: ®, b: &buf, mask: &mask)) |
140 | goto fail; |
141 | } |
142 | |
143 | efx_writeo(efx, value: &original, reg: address); |
144 | } |
145 | |
146 | return 0; |
147 | |
148 | fail: |
149 | netif_err(efx, hw, efx->net_dev, |
150 | "wrote " EFX_OWORD_FMT" read " EFX_OWORD_FMT |
151 | " at address 0x%x mask " EFX_OWORD_FMT"\n" , EFX_OWORD_VAL(reg), |
152 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); |
153 | return -EIO; |
154 | } |
155 | |
156 | /************************************************************************** |
157 | * |
158 | * Special buffer handling |
159 | * Special buffers are used for event queues and the TX and RX |
160 | * descriptor rings. |
161 | * |
162 | *************************************************************************/ |
163 | |
164 | /* |
165 | * Initialise a special buffer |
166 | * |
167 | * This will define a buffer (previously allocated via |
168 | * efx_alloc_special_buffer()) in the buffer table, allowing |
169 | * it to be used for event queues, descriptor rings etc. |
170 | */ |
171 | static void |
172 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
173 | { |
174 | efx_qword_t buf_desc; |
175 | unsigned int index; |
176 | dma_addr_t dma_addr; |
177 | int i; |
178 | |
179 | EFX_WARN_ON_PARANOID(!buffer->buf.addr); |
180 | |
181 | /* Write buffer descriptors to NIC */ |
182 | for (i = 0; i < buffer->entries; i++) { |
183 | index = buffer->index + i; |
184 | dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); |
185 | netif_dbg(efx, probe, efx->net_dev, |
186 | "mapping special buffer %d at %llx\n" , |
187 | index, (unsigned long long)dma_addr); |
188 | EFX_POPULATE_QWORD_3(buf_desc, |
189 | FRF_AZ_BUF_ADR_REGION, 0, |
190 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
191 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); |
192 | efx_write_buf_tbl(efx, value: &buf_desc, index); |
193 | } |
194 | } |
195 | |
196 | /* Unmaps a buffer and clears the buffer table entries */ |
197 | static void |
198 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
199 | { |
200 | efx_oword_t buf_tbl_upd; |
201 | unsigned int start = buffer->index; |
202 | unsigned int end = (buffer->index + buffer->entries - 1); |
203 | |
204 | if (!buffer->entries) |
205 | return; |
206 | |
207 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n" , |
208 | buffer->index, buffer->index + buffer->entries - 1); |
209 | |
210 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
211 | FRF_AZ_BUF_UPD_CMD, 0, |
212 | FRF_AZ_BUF_CLR_CMD, 1, |
213 | FRF_AZ_BUF_CLR_END_ID, end, |
214 | FRF_AZ_BUF_CLR_START_ID, start); |
215 | efx_writeo(efx, value: &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); |
216 | } |
217 | |
218 | /* |
219 | * Allocate a new special buffer |
220 | * |
221 | * This allocates memory for a new buffer, clears it and allocates a |
222 | * new buffer ID range. It does not write into the buffer table. |
223 | * |
224 | * This call will allocate 4KB buffers, since 8KB buffers can't be |
225 | * used for event queues and descriptor rings. |
226 | */ |
227 | static int efx_alloc_special_buffer(struct efx_nic *efx, |
228 | struct efx_special_buffer *buffer, |
229 | unsigned int len) |
230 | { |
231 | #ifdef CONFIG_SFC_SIENA_SRIOV |
232 | struct siena_nic_data *nic_data = efx->nic_data; |
233 | #endif |
234 | len = ALIGN(len, EFX_BUF_SIZE); |
235 | |
236 | if (efx_siena_alloc_buffer(efx, buffer: &buffer->buf, len, GFP_KERNEL)) |
237 | return -ENOMEM; |
238 | buffer->entries = len / EFX_BUF_SIZE; |
239 | BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); |
240 | |
241 | /* Select new buffer ID */ |
242 | buffer->index = efx->next_buffer_table; |
243 | efx->next_buffer_table += buffer->entries; |
244 | #ifdef CONFIG_SFC_SIENA_SRIOV |
245 | BUG_ON(efx_siena_sriov_enabled(efx) && |
246 | nic_data->vf_buftbl_base < efx->next_buffer_table); |
247 | #endif |
248 | |
249 | netif_dbg(efx, probe, efx->net_dev, |
250 | "allocating special buffers %d-%d at %llx+%x " |
251 | "(virt %p phys %llx)\n" , buffer->index, |
252 | buffer->index + buffer->entries - 1, |
253 | (u64)buffer->buf.dma_addr, len, |
254 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | static void |
260 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
261 | { |
262 | if (!buffer->buf.addr) |
263 | return; |
264 | |
265 | netif_dbg(efx, hw, efx->net_dev, |
266 | "deallocating special buffers %d-%d at %llx+%x " |
267 | "(virt %p phys %llx)\n" , buffer->index, |
268 | buffer->index + buffer->entries - 1, |
269 | (u64)buffer->buf.dma_addr, buffer->buf.len, |
270 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); |
271 | |
272 | efx_siena_free_buffer(efx, buffer: &buffer->buf); |
273 | buffer->entries = 0; |
274 | } |
275 | |
276 | /************************************************************************** |
277 | * |
278 | * TX path |
279 | * |
280 | **************************************************************************/ |
281 | |
282 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
283 | static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) |
284 | { |
285 | unsigned write_ptr; |
286 | efx_dword_t reg; |
287 | |
288 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
289 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
290 | efx_writed_page(tx_queue->efx, ®, |
291 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
292 | } |
293 | |
294 | /* Write pointer and first descriptor for TX descriptor ring */ |
295 | static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, |
296 | const efx_qword_t *txd) |
297 | { |
298 | unsigned write_ptr; |
299 | efx_oword_t reg; |
300 | |
301 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); |
302 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); |
303 | |
304 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
305 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, |
306 | FRF_AZ_TX_DESC_WPTR, write_ptr); |
307 | reg.qword[0] = *txd; |
308 | efx_writeo_page(tx_queue->efx, ®, |
309 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); |
310 | } |
311 | |
312 | |
313 | /* For each entry inserted into the software descriptor ring, create a |
314 | * descriptor in the hardware TX descriptor ring (in host memory), and |
315 | * write a doorbell. |
316 | */ |
317 | void efx_farch_tx_write(struct efx_tx_queue *tx_queue) |
318 | { |
319 | struct efx_tx_buffer *buffer; |
320 | efx_qword_t *txd; |
321 | unsigned write_ptr; |
322 | unsigned old_write_count = tx_queue->write_count; |
323 | |
324 | tx_queue->xmit_pending = false; |
325 | if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
326 | return; |
327 | |
328 | do { |
329 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
330 | buffer = &tx_queue->buffer[write_ptr]; |
331 | txd = efx_tx_desc(tx_queue, index: write_ptr); |
332 | ++tx_queue->write_count; |
333 | |
334 | EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); |
335 | |
336 | /* Create TX descriptor ring entry */ |
337 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); |
338 | EFX_POPULATE_QWORD_4(*txd, |
339 | FSF_AZ_TX_KER_CONT, |
340 | buffer->flags & EFX_TX_BUF_CONT, |
341 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, |
342 | FSF_AZ_TX_KER_BUF_REGION, 0, |
343 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
344 | } while (tx_queue->write_count != tx_queue->insert_count); |
345 | |
346 | wmb(); /* Ensure descriptors are written before they are fetched */ |
347 | |
348 | if (efx_nic_may_push_tx_desc(tx_queue, write_count: old_write_count)) { |
349 | txd = efx_tx_desc(tx_queue, |
350 | index: old_write_count & tx_queue->ptr_mask); |
351 | efx_farch_push_tx_desc(tx_queue, txd); |
352 | ++tx_queue->pushes; |
353 | } else { |
354 | efx_farch_notify_tx_desc(tx_queue); |
355 | } |
356 | } |
357 | |
358 | unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, |
359 | dma_addr_t dma_addr, unsigned int len) |
360 | { |
361 | /* Don't cross 4K boundaries with descriptors. */ |
362 | unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; |
363 | |
364 | len = min(limit, len); |
365 | |
366 | return len; |
367 | } |
368 | |
369 | |
370 | /* Allocate hardware resources for a TX queue */ |
371 | int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) |
372 | { |
373 | struct efx_nic *efx = tx_queue->efx; |
374 | unsigned entries; |
375 | |
376 | tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | |
377 | ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); |
378 | entries = tx_queue->ptr_mask + 1; |
379 | return efx_alloc_special_buffer(efx, buffer: &tx_queue->txd, |
380 | len: entries * sizeof(efx_qword_t)); |
381 | } |
382 | |
383 | void efx_farch_tx_init(struct efx_tx_queue *tx_queue) |
384 | { |
385 | int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; |
386 | struct efx_nic *efx = tx_queue->efx; |
387 | efx_oword_t reg; |
388 | |
389 | /* Pin TX descriptor ring */ |
390 | efx_init_special_buffer(efx, buffer: &tx_queue->txd); |
391 | |
392 | /* Push TX descriptor ring to card */ |
393 | EFX_POPULATE_OWORD_10(reg, |
394 | FRF_AZ_TX_DESCQ_EN, 1, |
395 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
396 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
397 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
398 | FRF_AZ_TX_DESCQ_EVQ_ID, |
399 | tx_queue->channel->channel, |
400 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, |
401 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, |
402 | FRF_AZ_TX_DESCQ_SIZE, |
403 | __ffs(tx_queue->txd.entries), |
404 | FRF_AZ_TX_DESCQ_TYPE, 0, |
405 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); |
406 | |
407 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
408 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); |
409 | |
410 | efx_writeo_table(efx, value: ®, reg: efx->type->txd_ptr_tbl_base, |
411 | index: tx_queue->queue); |
412 | |
413 | EFX_POPULATE_OWORD_1(reg, |
414 | FRF_BZ_TX_PACE, |
415 | (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? |
416 | FFE_BZ_TX_PACE_OFF : |
417 | FFE_BZ_TX_PACE_RESERVED); |
418 | efx_writeo_table(efx, value: ®, FR_BZ_TX_PACE_TBL, index: tx_queue->queue); |
419 | |
420 | tx_queue->tso_version = 1; |
421 | } |
422 | |
423 | static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) |
424 | { |
425 | struct efx_nic *efx = tx_queue->efx; |
426 | efx_oword_t tx_flush_descq; |
427 | |
428 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); |
429 | atomic_set(v: &tx_queue->flush_outstanding, i: 1); |
430 | |
431 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
432 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
433 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); |
434 | efx_writeo(efx, value: &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); |
435 | } |
436 | |
437 | void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) |
438 | { |
439 | struct efx_nic *efx = tx_queue->efx; |
440 | efx_oword_t tx_desc_ptr; |
441 | |
442 | /* Remove TX descriptor ring from card */ |
443 | EFX_ZERO_OWORD(tx_desc_ptr); |
444 | efx_writeo_table(efx, value: &tx_desc_ptr, reg: efx->type->txd_ptr_tbl_base, |
445 | index: tx_queue->queue); |
446 | |
447 | /* Unpin TX descriptor ring */ |
448 | efx_fini_special_buffer(efx, buffer: &tx_queue->txd); |
449 | } |
450 | |
451 | /* Free buffers backing TX queue */ |
452 | void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) |
453 | { |
454 | efx_free_special_buffer(efx: tx_queue->efx, buffer: &tx_queue->txd); |
455 | } |
456 | |
457 | /************************************************************************** |
458 | * |
459 | * RX path |
460 | * |
461 | **************************************************************************/ |
462 | |
463 | /* This creates an entry in the RX descriptor queue */ |
464 | static inline void |
465 | efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) |
466 | { |
467 | struct efx_rx_buffer *rx_buf; |
468 | efx_qword_t *rxd; |
469 | |
470 | rxd = efx_rx_desc(rx_queue, index); |
471 | rx_buf = efx_rx_buffer(rx_queue, index); |
472 | EFX_POPULATE_QWORD_3(*rxd, |
473 | FSF_AZ_RX_KER_BUF_SIZE, |
474 | rx_buf->len - |
475 | rx_queue->efx->type->rx_buffer_padding, |
476 | FSF_AZ_RX_KER_BUF_REGION, 0, |
477 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
478 | } |
479 | |
480 | /* This writes to the RX_DESC_WPTR register for the specified receive |
481 | * descriptor ring. |
482 | */ |
483 | void efx_farch_rx_write(struct efx_rx_queue *rx_queue) |
484 | { |
485 | struct efx_nic *efx = rx_queue->efx; |
486 | efx_dword_t reg; |
487 | unsigned write_ptr; |
488 | |
489 | while (rx_queue->notified_count != rx_queue->added_count) { |
490 | efx_farch_build_rx_desc( |
491 | rx_queue, |
492 | index: rx_queue->notified_count & rx_queue->ptr_mask); |
493 | ++rx_queue->notified_count; |
494 | } |
495 | |
496 | wmb(); |
497 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
498 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
499 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
500 | efx_rx_queue_index(rx_queue)); |
501 | } |
502 | |
503 | int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) |
504 | { |
505 | struct efx_nic *efx = rx_queue->efx; |
506 | unsigned entries; |
507 | |
508 | entries = rx_queue->ptr_mask + 1; |
509 | return efx_alloc_special_buffer(efx, buffer: &rx_queue->rxd, |
510 | len: entries * sizeof(efx_qword_t)); |
511 | } |
512 | |
513 | void efx_farch_rx_init(struct efx_rx_queue *rx_queue) |
514 | { |
515 | efx_oword_t rx_desc_ptr; |
516 | struct efx_nic *efx = rx_queue->efx; |
517 | bool jumbo_en; |
518 | |
519 | /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ |
520 | jumbo_en = efx->rx_scatter; |
521 | |
522 | netif_dbg(efx, hw, efx->net_dev, |
523 | "RX queue %d ring in special buffers %d-%d\n" , |
524 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, |
525 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
526 | |
527 | rx_queue->scatter_n = 0; |
528 | |
529 | /* Pin RX descriptor ring */ |
530 | efx_init_special_buffer(efx, buffer: &rx_queue->rxd); |
531 | |
532 | /* Push RX descriptor ring to card */ |
533 | EFX_POPULATE_OWORD_10(rx_desc_ptr, |
534 | FRF_AZ_RX_ISCSI_DDIG_EN, true, |
535 | FRF_AZ_RX_ISCSI_HDIG_EN, true, |
536 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
537 | FRF_AZ_RX_DESCQ_EVQ_ID, |
538 | efx_rx_queue_channel(rx_queue)->channel, |
539 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
540 | FRF_AZ_RX_DESCQ_LABEL, |
541 | efx_rx_queue_index(rx_queue), |
542 | FRF_AZ_RX_DESCQ_SIZE, |
543 | __ffs(rx_queue->rxd.entries), |
544 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , |
545 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, |
546 | FRF_AZ_RX_DESCQ_EN, 1); |
547 | efx_writeo_table(efx, value: &rx_desc_ptr, reg: efx->type->rxd_ptr_tbl_base, |
548 | index: efx_rx_queue_index(rx_queue)); |
549 | } |
550 | |
551 | static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) |
552 | { |
553 | struct efx_nic *efx = rx_queue->efx; |
554 | efx_oword_t rx_flush_descq; |
555 | |
556 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
557 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
558 | FRF_AZ_RX_FLUSH_DESCQ, |
559 | efx_rx_queue_index(rx_queue)); |
560 | efx_writeo(efx, value: &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
561 | } |
562 | |
563 | void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) |
564 | { |
565 | efx_oword_t rx_desc_ptr; |
566 | struct efx_nic *efx = rx_queue->efx; |
567 | |
568 | /* Remove RX descriptor ring from card */ |
569 | EFX_ZERO_OWORD(rx_desc_ptr); |
570 | efx_writeo_table(efx, value: &rx_desc_ptr, reg: efx->type->rxd_ptr_tbl_base, |
571 | index: efx_rx_queue_index(rx_queue)); |
572 | |
573 | /* Unpin RX descriptor ring */ |
574 | efx_fini_special_buffer(efx, buffer: &rx_queue->rxd); |
575 | } |
576 | |
577 | /* Free buffers backing RX queue */ |
578 | void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) |
579 | { |
580 | efx_free_special_buffer(efx: rx_queue->efx, buffer: &rx_queue->rxd); |
581 | } |
582 | |
583 | /************************************************************************** |
584 | * |
585 | * Flush handling |
586 | * |
587 | **************************************************************************/ |
588 | |
589 | /* efx_farch_flush_queues() must be woken up when all flushes are completed, |
590 | * or more RX flushes can be kicked off. |
591 | */ |
592 | static bool efx_farch_flush_wake(struct efx_nic *efx) |
593 | { |
594 | /* Ensure that all updates are visible to efx_farch_flush_queues() */ |
595 | smp_mb(); |
596 | |
597 | return (atomic_read(v: &efx->active_queues) == 0 || |
598 | (atomic_read(v: &efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT |
599 | && atomic_read(v: &efx->rxq_flush_pending) > 0)); |
600 | } |
601 | |
602 | static bool efx_check_tx_flush_complete(struct efx_nic *efx) |
603 | { |
604 | bool i = true; |
605 | efx_oword_t txd_ptr_tbl; |
606 | struct efx_channel *channel; |
607 | struct efx_tx_queue *tx_queue; |
608 | |
609 | efx_for_each_channel(channel, efx) { |
610 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
611 | efx_reado_table(efx, value: &txd_ptr_tbl, |
612 | FR_BZ_TX_DESC_PTR_TBL, index: tx_queue->queue); |
613 | if (EFX_OWORD_FIELD(txd_ptr_tbl, |
614 | FRF_AZ_TX_DESCQ_FLUSH) || |
615 | EFX_OWORD_FIELD(txd_ptr_tbl, |
616 | FRF_AZ_TX_DESCQ_EN)) { |
617 | netif_dbg(efx, hw, efx->net_dev, |
618 | "flush did not complete on TXQ %d\n" , |
619 | tx_queue->queue); |
620 | i = false; |
621 | } else if (atomic_cmpxchg(v: &tx_queue->flush_outstanding, |
622 | old: 1, new: 0)) { |
623 | /* The flush is complete, but we didn't |
624 | * receive a flush completion event |
625 | */ |
626 | netif_dbg(efx, hw, efx->net_dev, |
627 | "flush complete on TXQ %d, so drain " |
628 | "the queue\n" , tx_queue->queue); |
629 | /* Don't need to increment active_queues as it |
630 | * has already been incremented for the queues |
631 | * which did not drain |
632 | */ |
633 | efx_farch_magic_event(channel, |
634 | EFX_CHANNEL_MAGIC_TX_DRAIN( |
635 | tx_queue)); |
636 | } |
637 | } |
638 | } |
639 | |
640 | return i; |
641 | } |
642 | |
643 | /* Flush all the transmit queues, and continue flushing receive queues until |
644 | * they're all flushed. Wait for the DRAIN events to be received so that there |
645 | * are no more RX and TX events left on any channel. */ |
646 | static int efx_farch_do_flush(struct efx_nic *efx) |
647 | { |
648 | unsigned timeout = msecs_to_jiffies(m: 5000); /* 5s for all flushes and drains */ |
649 | struct efx_channel *channel; |
650 | struct efx_rx_queue *rx_queue; |
651 | struct efx_tx_queue *tx_queue; |
652 | int rc = 0; |
653 | |
654 | efx_for_each_channel(channel, efx) { |
655 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
656 | efx_farch_flush_tx_queue(tx_queue); |
657 | } |
658 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
659 | rx_queue->flush_pending = true; |
660 | atomic_inc(v: &efx->rxq_flush_pending); |
661 | } |
662 | } |
663 | |
664 | while (timeout && atomic_read(v: &efx->active_queues) > 0) { |
665 | /* If SRIOV is enabled, then offload receive queue flushing to |
666 | * the firmware (though we will still have to poll for |
667 | * completion). If that fails, fall back to the old scheme. |
668 | */ |
669 | if (efx_siena_sriov_enabled(efx)) { |
670 | rc = efx_siena_mcdi_flush_rxqs(efx); |
671 | if (!rc) |
672 | goto wait; |
673 | } |
674 | |
675 | /* The hardware supports four concurrent rx flushes, each of |
676 | * which may need to be retried if there is an outstanding |
677 | * descriptor fetch |
678 | */ |
679 | efx_for_each_channel(channel, efx) { |
680 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
681 | if (atomic_read(v: &efx->rxq_flush_outstanding) >= |
682 | EFX_RX_FLUSH_COUNT) |
683 | break; |
684 | |
685 | if (rx_queue->flush_pending) { |
686 | rx_queue->flush_pending = false; |
687 | atomic_dec(v: &efx->rxq_flush_pending); |
688 | atomic_inc(v: &efx->rxq_flush_outstanding); |
689 | efx_farch_flush_rx_queue(rx_queue); |
690 | } |
691 | } |
692 | } |
693 | |
694 | wait: |
695 | timeout = wait_event_timeout(efx->flush_wq, |
696 | efx_farch_flush_wake(efx), |
697 | timeout); |
698 | } |
699 | |
700 | if (atomic_read(v: &efx->active_queues) && |
701 | !efx_check_tx_flush_complete(efx)) { |
702 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " |
703 | "(rx %d+%d)\n" , atomic_read(&efx->active_queues), |
704 | atomic_read(&efx->rxq_flush_outstanding), |
705 | atomic_read(&efx->rxq_flush_pending)); |
706 | rc = -ETIMEDOUT; |
707 | |
708 | atomic_set(v: &efx->active_queues, i: 0); |
709 | atomic_set(v: &efx->rxq_flush_pending, i: 0); |
710 | atomic_set(v: &efx->rxq_flush_outstanding, i: 0); |
711 | } |
712 | |
713 | return rc; |
714 | } |
715 | |
716 | int efx_farch_fini_dmaq(struct efx_nic *efx) |
717 | { |
718 | struct efx_channel *channel; |
719 | struct efx_tx_queue *tx_queue; |
720 | struct efx_rx_queue *rx_queue; |
721 | int rc = 0; |
722 | |
723 | /* Do not attempt to write to the NIC during EEH recovery */ |
724 | if (efx->state != STATE_RECOVERY) { |
725 | /* Only perform flush if DMA is enabled */ |
726 | if (efx->pci_dev->is_busmaster) { |
727 | efx->type->prepare_flush(efx); |
728 | rc = efx_farch_do_flush(efx); |
729 | efx->type->finish_flush(efx); |
730 | } |
731 | |
732 | efx_for_each_channel(channel, efx) { |
733 | efx_for_each_channel_rx_queue(rx_queue, channel) |
734 | efx_farch_rx_fini(rx_queue); |
735 | efx_for_each_channel_tx_queue(tx_queue, channel) |
736 | efx_farch_tx_fini(tx_queue); |
737 | } |
738 | } |
739 | |
740 | return rc; |
741 | } |
742 | |
743 | /* Reset queue and flush accounting after FLR |
744 | * |
745 | * One possible cause of FLR recovery is that DMA may be failing (eg. if bus |
746 | * mastering was disabled), in which case we don't receive (RXQ) flush |
747 | * completion events. This means that efx->rxq_flush_outstanding remained at 4 |
748 | * after the FLR; also, efx->active_queues was non-zero (as no flush completion |
749 | * events were received, and we didn't go through efx_check_tx_flush_complete()) |
750 | * If we don't fix this up, on the next call to efx_siena_realloc_channels() we |
751 | * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit |
752 | * of 4 for batched flush requests; and the efx->active_queues gets messed up |
753 | * because we keep incrementing for the newly initialised queues, but it never |
754 | * went to zero previously. Then we get a timeout every time we try to restart |
755 | * the queues, as it doesn't go back to zero when we should be flushing the |
756 | * queues. |
757 | */ |
758 | void efx_farch_finish_flr(struct efx_nic *efx) |
759 | { |
760 | atomic_set(v: &efx->rxq_flush_pending, i: 0); |
761 | atomic_set(v: &efx->rxq_flush_outstanding, i: 0); |
762 | atomic_set(v: &efx->active_queues, i: 0); |
763 | } |
764 | |
765 | |
766 | /************************************************************************** |
767 | * |
768 | * Event queue processing |
769 | * Event queues are processed by per-channel tasklets. |
770 | * |
771 | **************************************************************************/ |
772 | |
773 | /* Update a channel's event queue's read pointer (RPTR) register |
774 | * |
775 | * This writes the EVQ_RPTR_REG register for the specified channel's |
776 | * event queue. |
777 | */ |
778 | void efx_farch_ev_read_ack(struct efx_channel *channel) |
779 | { |
780 | efx_dword_t reg; |
781 | struct efx_nic *efx = channel->efx; |
782 | |
783 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
784 | channel->eventq_read_ptr & channel->eventq_mask); |
785 | |
786 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size |
787 | * of 4 bytes, but it is really 16 bytes just like later revisions. |
788 | */ |
789 | efx_writed(efx, value: ®, |
790 | reg: efx->type->evq_rptr_tbl_base + |
791 | FR_BZ_EVQ_RPTR_STEP * channel->channel); |
792 | } |
793 | |
794 | /* Use HW to insert a SW defined event */ |
795 | void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, |
796 | efx_qword_t *event) |
797 | { |
798 | efx_oword_t drv_ev_reg; |
799 | |
800 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || |
801 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); |
802 | drv_ev_reg.u32[0] = event->u32[0]; |
803 | drv_ev_reg.u32[1] = event->u32[1]; |
804 | drv_ev_reg.u32[2] = 0; |
805 | drv_ev_reg.u32[3] = 0; |
806 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); |
807 | efx_writeo(efx, value: &drv_ev_reg, FR_AZ_DRV_EV); |
808 | } |
809 | |
810 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) |
811 | { |
812 | efx_qword_t event; |
813 | |
814 | EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, |
815 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
816 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
817 | efx_farch_generate_event(efx: channel->efx, evq: channel->channel, event: &event); |
818 | } |
819 | |
820 | /* Handle a transmit completion event |
821 | * |
822 | * The NIC batches TX completion events; the message we receive is of |
823 | * the form "complete all TX events up to this index". |
824 | */ |
825 | static void |
826 | efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
827 | { |
828 | unsigned int tx_ev_desc_ptr; |
829 | unsigned int tx_ev_q_label; |
830 | struct efx_tx_queue *tx_queue; |
831 | struct efx_nic *efx = channel->efx; |
832 | |
833 | if (unlikely(READ_ONCE(efx->reset_pending))) |
834 | return; |
835 | |
836 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
837 | /* Transmit completion */ |
838 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
839 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
840 | tx_queue = channel->tx_queue + |
841 | (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); |
842 | efx_siena_xmit_done(tx_queue, index: tx_ev_desc_ptr); |
843 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
844 | /* Rewrite the FIFO write pointer */ |
845 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
846 | tx_queue = channel->tx_queue + |
847 | (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); |
848 | |
849 | netif_tx_lock(dev: efx->net_dev); |
850 | efx_farch_notify_tx_desc(tx_queue); |
851 | netif_tx_unlock(dev: efx->net_dev); |
852 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { |
853 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
854 | } else { |
855 | netif_err(efx, tx_err, efx->net_dev, |
856 | "channel %d unexpected TX event " |
857 | EFX_QWORD_FMT"\n" , channel->channel, |
858 | EFX_QWORD_VAL(*event)); |
859 | } |
860 | } |
861 | |
862 | /* Detect errors included in the rx_evt_pkt_ok bit. */ |
863 | static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, |
864 | const efx_qword_t *event) |
865 | { |
866 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
867 | struct efx_nic *efx = rx_queue->efx; |
868 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
869 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
870 | bool rx_ev_frm_trunc, rx_ev_tobe_disc; |
871 | bool rx_ev_other_err, rx_ev_pause_frm; |
872 | |
873 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); |
874 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, |
875 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); |
876 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, |
877 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); |
878 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, |
879 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); |
880 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); |
881 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); |
882 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); |
883 | |
884 | /* Every error apart from tobe_disc and pause_frm */ |
885 | rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | |
886 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | |
887 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); |
888 | |
889 | /* Count errors that are not in MAC stats. Ignore expected |
890 | * checksum errors during self-test. */ |
891 | if (rx_ev_frm_trunc) |
892 | ++channel->n_rx_frm_trunc; |
893 | else if (rx_ev_tobe_disc) |
894 | ++channel->n_rx_tobe_disc; |
895 | else if (!efx->loopback_selftest) { |
896 | if (rx_ev_ip_hdr_chksum_err) |
897 | ++channel->n_rx_ip_hdr_chksum_err; |
898 | else if (rx_ev_tcp_udp_chksum_err) |
899 | ++channel->n_rx_tcp_udp_chksum_err; |
900 | } |
901 | |
902 | /* TOBE_DISC is expected on unicast mismatches; don't print out an |
903 | * error message. FRM_TRUNC indicates RXDP dropped the packet due |
904 | * to a FIFO overflow. |
905 | */ |
906 | #ifdef DEBUG |
907 | if (rx_ev_other_err && net_ratelimit()) { |
908 | netif_dbg(efx, rx_err, efx->net_dev, |
909 | " RX queue %d unexpected RX event " |
910 | EFX_QWORD_FMT "%s%s%s%s%s%s%s\n" , |
911 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), |
912 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "" , |
913 | rx_ev_ip_hdr_chksum_err ? |
914 | " [IP_HDR_CHKSUM_ERR]" : "" , |
915 | rx_ev_tcp_udp_chksum_err ? |
916 | " [TCP_UDP_CHKSUM_ERR]" : "" , |
917 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "" , |
918 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "" , |
919 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "" , |
920 | rx_ev_pause_frm ? " [PAUSE]" : "" ); |
921 | } |
922 | #else |
923 | (void) rx_ev_other_err; |
924 | #endif |
925 | |
926 | if (efx->net_dev->features & NETIF_F_RXALL) |
927 | /* don't discard frame for CRC error */ |
928 | rx_ev_eth_crc_err = false; |
929 | |
930 | /* The frame must be discarded if any of these are true. */ |
931 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | |
932 | rx_ev_tobe_disc | rx_ev_pause_frm) ? |
933 | EFX_RX_PKT_DISCARD : 0; |
934 | } |
935 | |
936 | /* Handle receive events that are not in-order. Return true if this |
937 | * can be handled as a partial packet discard, false if it's more |
938 | * serious. |
939 | */ |
940 | static bool |
941 | efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) |
942 | { |
943 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
944 | struct efx_nic *efx = rx_queue->efx; |
945 | unsigned expected, dropped; |
946 | |
947 | if (rx_queue->scatter_n && |
948 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & |
949 | rx_queue->ptr_mask)) { |
950 | ++channel->n_rx_nodesc_trunc; |
951 | return true; |
952 | } |
953 | |
954 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
955 | dropped = (index - expected) & rx_queue->ptr_mask; |
956 | netif_info(efx, rx_err, efx->net_dev, |
957 | "dropped %d events (index=%d expected=%d)\n" , |
958 | dropped, index, expected); |
959 | |
960 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DISABLE); |
961 | return false; |
962 | } |
963 | |
964 | /* Handle a packet received event |
965 | * |
966 | * The NIC gives a "discard" flag if it's a unicast packet with the |
967 | * wrong destination address |
968 | * Also "is multicast" and "matches multicast filter" flags can be used to |
969 | * discard non-matching multicast packets. |
970 | */ |
971 | static void |
972 | efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) |
973 | { |
974 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; |
975 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; |
976 | unsigned expected_ptr; |
977 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; |
978 | u16 flags; |
979 | struct efx_rx_queue *rx_queue; |
980 | struct efx_nic *efx = channel->efx; |
981 | |
982 | if (unlikely(READ_ONCE(efx->reset_pending))) |
983 | return; |
984 | |
985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
986 | rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); |
987 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
988 | channel->channel); |
989 | |
990 | rx_queue = efx_channel_get_rx_queue(channel); |
991 | |
992 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
993 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & |
994 | rx_queue->ptr_mask); |
995 | |
996 | /* Check for partial drops and other errors */ |
997 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || |
998 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { |
999 | if (rx_ev_desc_ptr != expected_ptr && |
1000 | !efx_farch_handle_rx_bad_index(rx_queue, index: rx_ev_desc_ptr)) |
1001 | return; |
1002 | |
1003 | /* Discard all pending fragments */ |
1004 | if (rx_queue->scatter_n) { |
1005 | efx_siena_rx_packet( |
1006 | rx_queue, |
1007 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1008 | n_frags: rx_queue->scatter_n, len: 0, EFX_RX_PKT_DISCARD); |
1009 | rx_queue->removed_count += rx_queue->scatter_n; |
1010 | rx_queue->scatter_n = 0; |
1011 | } |
1012 | |
1013 | /* Return if there is no new fragment */ |
1014 | if (rx_ev_desc_ptr != expected_ptr) |
1015 | return; |
1016 | |
1017 | /* Discard new fragment if not SOP */ |
1018 | if (!rx_ev_sop) { |
1019 | efx_siena_rx_packet( |
1020 | rx_queue, |
1021 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1022 | n_frags: 1, len: 0, EFX_RX_PKT_DISCARD); |
1023 | ++rx_queue->removed_count; |
1024 | return; |
1025 | } |
1026 | } |
1027 | |
1028 | ++rx_queue->scatter_n; |
1029 | if (rx_ev_cont) |
1030 | return; |
1031 | |
1032 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
1033 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); |
1034 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
1035 | |
1036 | if (likely(rx_ev_pkt_ok)) { |
1037 | /* If packet is marked as OK then we can rely on the |
1038 | * hardware checksum and classification. |
1039 | */ |
1040 | flags = 0; |
1041 | switch (rx_ev_hdr_type) { |
1042 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: |
1043 | flags |= EFX_RX_PKT_TCP; |
1044 | fallthrough; |
1045 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: |
1046 | flags |= EFX_RX_PKT_CSUMMED; |
1047 | fallthrough; |
1048 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: |
1049 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: |
1050 | break; |
1051 | } |
1052 | } else { |
1053 | flags = efx_farch_handle_rx_not_ok(rx_queue, event); |
1054 | } |
1055 | |
1056 | /* Detect multicast packets that didn't match the filter */ |
1057 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
1058 | if (rx_ev_mcast_pkt) { |
1059 | unsigned int rx_ev_mcast_hash_match = |
1060 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); |
1061 | |
1062 | if (unlikely(!rx_ev_mcast_hash_match)) { |
1063 | ++channel->n_rx_mcast_mismatch; |
1064 | flags |= EFX_RX_PKT_DISCARD; |
1065 | } |
1066 | } |
1067 | |
1068 | channel->irq_mod_score += 2; |
1069 | |
1070 | /* Handle received packet */ |
1071 | efx_siena_rx_packet(rx_queue, |
1072 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1073 | n_frags: rx_queue->scatter_n, len: rx_ev_byte_cnt, flags); |
1074 | rx_queue->removed_count += rx_queue->scatter_n; |
1075 | rx_queue->scatter_n = 0; |
1076 | } |
1077 | |
1078 | /* If this flush done event corresponds to a &struct efx_tx_queue, then |
1079 | * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue |
1080 | * of all transmit completions. |
1081 | */ |
1082 | static void |
1083 | efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) |
1084 | { |
1085 | struct efx_tx_queue *tx_queue; |
1086 | struct efx_channel *channel; |
1087 | int qid; |
1088 | |
1089 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
1090 | if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { |
1091 | channel = efx_get_tx_channel(efx, index: qid / EFX_MAX_TXQ_PER_CHANNEL); |
1092 | tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); |
1093 | if (atomic_cmpxchg(v: &tx_queue->flush_outstanding, old: 1, new: 0)) |
1094 | efx_farch_magic_event(channel: tx_queue->channel, |
1095 | EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); |
1096 | } |
1097 | } |
1098 | |
1099 | /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush |
1100 | * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add |
1101 | * the RX queue back to the mask of RX queues in need of flushing. |
1102 | */ |
1103 | static void |
1104 | efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) |
1105 | { |
1106 | struct efx_channel *channel; |
1107 | struct efx_rx_queue *rx_queue; |
1108 | int qid; |
1109 | bool failed; |
1110 | |
1111 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); |
1112 | failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1113 | if (qid >= efx->n_channels) |
1114 | return; |
1115 | channel = efx_get_channel(efx, index: qid); |
1116 | if (!efx_channel_has_rx_queue(channel)) |
1117 | return; |
1118 | rx_queue = efx_channel_get_rx_queue(channel); |
1119 | |
1120 | if (failed) { |
1121 | netif_info(efx, hw, efx->net_dev, |
1122 | "RXQ %d flush retry\n" , qid); |
1123 | rx_queue->flush_pending = true; |
1124 | atomic_inc(v: &efx->rxq_flush_pending); |
1125 | } else { |
1126 | efx_farch_magic_event(channel: efx_rx_queue_channel(rx_queue), |
1127 | EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); |
1128 | } |
1129 | atomic_dec(v: &efx->rxq_flush_outstanding); |
1130 | if (efx_farch_flush_wake(efx)) |
1131 | wake_up(&efx->flush_wq); |
1132 | } |
1133 | |
1134 | static void |
1135 | efx_farch_handle_drain_event(struct efx_channel *channel) |
1136 | { |
1137 | struct efx_nic *efx = channel->efx; |
1138 | |
1139 | WARN_ON(atomic_read(&efx->active_queues) == 0); |
1140 | atomic_dec(v: &efx->active_queues); |
1141 | if (efx_farch_flush_wake(efx)) |
1142 | wake_up(&efx->flush_wq); |
1143 | } |
1144 | |
1145 | static void efx_farch_handle_generated_event(struct efx_channel *channel, |
1146 | efx_qword_t *event) |
1147 | { |
1148 | struct efx_nic *efx = channel->efx; |
1149 | struct efx_rx_queue *rx_queue = |
1150 | efx_channel_has_rx_queue(channel) ? |
1151 | efx_channel_get_rx_queue(channel) : NULL; |
1152 | unsigned magic, code; |
1153 | |
1154 | magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
1155 | code = _EFX_CHANNEL_MAGIC_CODE(magic); |
1156 | |
1157 | if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { |
1158 | channel->event_test_cpu = raw_smp_processor_id(); |
1159 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { |
1160 | /* The queue must be empty, so we won't receive any rx |
1161 | * events, so efx_process_channel() won't refill the |
1162 | * queue. Refill it here */ |
1163 | efx_siena_fast_push_rx_descriptors(rx_queue, atomic: true); |
1164 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { |
1165 | efx_farch_handle_drain_event(channel); |
1166 | } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { |
1167 | efx_farch_handle_drain_event(channel); |
1168 | } else { |
1169 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
1170 | "generated event " EFX_QWORD_FMT"\n" , |
1171 | channel->channel, EFX_QWORD_VAL(*event)); |
1172 | } |
1173 | } |
1174 | |
1175 | static void |
1176 | efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
1177 | { |
1178 | struct efx_nic *efx = channel->efx; |
1179 | unsigned int ev_sub_code; |
1180 | unsigned int ev_sub_data; |
1181 | |
1182 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); |
1183 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
1184 | |
1185 | switch (ev_sub_code) { |
1186 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
1187 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n" , |
1188 | channel->channel, ev_sub_data); |
1189 | efx_farch_handle_tx_flush_done(efx, event); |
1190 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1191 | efx_siena_sriov_tx_flush_done(efx, event); |
1192 | #endif |
1193 | break; |
1194 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
1195 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n" , |
1196 | channel->channel, ev_sub_data); |
1197 | efx_farch_handle_rx_flush_done(efx, event); |
1198 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1199 | efx_siena_sriov_rx_flush_done(efx, event); |
1200 | #endif |
1201 | break; |
1202 | case FSE_AZ_EVQ_INIT_DONE_EV: |
1203 | netif_dbg(efx, hw, efx->net_dev, |
1204 | "channel %d EVQ %d initialised\n" , |
1205 | channel->channel, ev_sub_data); |
1206 | break; |
1207 | case FSE_AZ_SRM_UPD_DONE_EV: |
1208 | netif_vdbg(efx, hw, efx->net_dev, |
1209 | "channel %d SRAM update done\n" , channel->channel); |
1210 | break; |
1211 | case FSE_AZ_WAKE_UP_EV: |
1212 | netif_vdbg(efx, hw, efx->net_dev, |
1213 | "channel %d RXQ %d wakeup event\n" , |
1214 | channel->channel, ev_sub_data); |
1215 | break; |
1216 | case FSE_AZ_TIMER_EV: |
1217 | netif_vdbg(efx, hw, efx->net_dev, |
1218 | "channel %d RX queue %d timer expired\n" , |
1219 | channel->channel, ev_sub_data); |
1220 | break; |
1221 | case FSE_AA_RX_RECOVER_EV: |
1222 | netif_err(efx, rx_err, efx->net_dev, |
1223 | "channel %d seen DRIVER RX_RESET event. " |
1224 | "Resetting.\n" , channel->channel); |
1225 | atomic_inc(v: &efx->rx_reset); |
1226 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DISABLE); |
1227 | break; |
1228 | case FSE_BZ_RX_DSC_ERROR_EV: |
1229 | if (ev_sub_data < EFX_VI_BASE) { |
1230 | netif_err(efx, rx_err, efx->net_dev, |
1231 | "RX DMA Q %d reports descriptor fetch error." |
1232 | " RX Q %d is disabled.\n" , ev_sub_data, |
1233 | ev_sub_data); |
1234 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
1235 | } |
1236 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1237 | else |
1238 | efx_siena_sriov_desc_fetch_err(efx, dmaq: ev_sub_data); |
1239 | #endif |
1240 | break; |
1241 | case FSE_BZ_TX_DSC_ERROR_EV: |
1242 | if (ev_sub_data < EFX_VI_BASE) { |
1243 | netif_err(efx, tx_err, efx->net_dev, |
1244 | "TX DMA Q %d reports descriptor fetch error." |
1245 | " TX Q %d is disabled.\n" , ev_sub_data, |
1246 | ev_sub_data); |
1247 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
1248 | } |
1249 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1250 | else |
1251 | efx_siena_sriov_desc_fetch_err(efx, dmaq: ev_sub_data); |
1252 | #endif |
1253 | break; |
1254 | default: |
1255 | netif_vdbg(efx, hw, efx->net_dev, |
1256 | "channel %d unknown driver event code %d " |
1257 | "data %04x\n" , channel->channel, ev_sub_code, |
1258 | ev_sub_data); |
1259 | break; |
1260 | } |
1261 | } |
1262 | |
1263 | int efx_farch_ev_process(struct efx_channel *channel, int budget) |
1264 | { |
1265 | struct efx_nic *efx = channel->efx; |
1266 | unsigned int read_ptr; |
1267 | efx_qword_t event, *p_event; |
1268 | int ev_code; |
1269 | int spent = 0; |
1270 | |
1271 | if (budget <= 0) |
1272 | return spent; |
1273 | |
1274 | read_ptr = channel->eventq_read_ptr; |
1275 | |
1276 | for (;;) { |
1277 | p_event = efx_event(channel, index: read_ptr); |
1278 | event = *p_event; |
1279 | |
1280 | if (!efx_event_present(event: &event)) |
1281 | /* End of events */ |
1282 | break; |
1283 | |
1284 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1285 | "channel %d event is " EFX_QWORD_FMT"\n" , |
1286 | channel->channel, EFX_QWORD_VAL(event)); |
1287 | |
1288 | /* Clear this event by marking it all ones */ |
1289 | EFX_SET_QWORD(*p_event); |
1290 | |
1291 | ++read_ptr; |
1292 | |
1293 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1294 | |
1295 | switch (ev_code) { |
1296 | case FSE_AZ_EV_CODE_RX_EV: |
1297 | efx_farch_handle_rx_event(channel, event: &event); |
1298 | if (++spent == budget) |
1299 | goto out; |
1300 | break; |
1301 | case FSE_AZ_EV_CODE_TX_EV: |
1302 | efx_farch_handle_tx_event(channel, event: &event); |
1303 | break; |
1304 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1305 | efx_farch_handle_generated_event(channel, event: &event); |
1306 | break; |
1307 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1308 | efx_farch_handle_driver_event(channel, event: &event); |
1309 | break; |
1310 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1311 | case FSE_CZ_EV_CODE_USER_EV: |
1312 | efx_siena_sriov_event(channel, event: &event); |
1313 | break; |
1314 | #endif |
1315 | case FSE_CZ_EV_CODE_MCDI_EV: |
1316 | efx_siena_mcdi_process_event(channel, event: &event); |
1317 | break; |
1318 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1319 | if (efx->type->handle_global_event && |
1320 | efx->type->handle_global_event(channel, &event)) |
1321 | break; |
1322 | fallthrough; |
1323 | default: |
1324 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1325 | "channel %d unknown event type %d (data " |
1326 | EFX_QWORD_FMT ")\n" , channel->channel, |
1327 | ev_code, EFX_QWORD_VAL(event)); |
1328 | } |
1329 | } |
1330 | |
1331 | out: |
1332 | channel->eventq_read_ptr = read_ptr; |
1333 | return spent; |
1334 | } |
1335 | |
1336 | /* Allocate buffer table entries for event queue */ |
1337 | int efx_farch_ev_probe(struct efx_channel *channel) |
1338 | { |
1339 | struct efx_nic *efx = channel->efx; |
1340 | unsigned entries; |
1341 | |
1342 | entries = channel->eventq_mask + 1; |
1343 | return efx_alloc_special_buffer(efx, buffer: &channel->eventq, |
1344 | len: entries * sizeof(efx_qword_t)); |
1345 | } |
1346 | |
1347 | int efx_farch_ev_init(struct efx_channel *channel) |
1348 | { |
1349 | efx_oword_t reg; |
1350 | struct efx_nic *efx = channel->efx; |
1351 | |
1352 | netif_dbg(efx, hw, efx->net_dev, |
1353 | "channel %d event queue in special buffers %d-%d\n" , |
1354 | channel->channel, channel->eventq.index, |
1355 | channel->eventq.index + channel->eventq.entries - 1); |
1356 | |
1357 | EFX_POPULATE_OWORD_3(reg, |
1358 | FRF_CZ_TIMER_Q_EN, 1, |
1359 | FRF_CZ_HOST_NOTIFY_MODE, 0, |
1360 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); |
1361 | efx_writeo_table(efx, value: ®, FR_BZ_TIMER_TBL, index: channel->channel); |
1362 | |
1363 | /* Pin event queue buffer */ |
1364 | efx_init_special_buffer(efx, buffer: &channel->eventq); |
1365 | |
1366 | /* Fill event queue with all ones (i.e. empty events) */ |
1367 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); |
1368 | |
1369 | /* Push event queue to card */ |
1370 | EFX_POPULATE_OWORD_3(reg, |
1371 | FRF_AZ_EVQ_EN, 1, |
1372 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), |
1373 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); |
1374 | efx_writeo_table(efx, value: ®, reg: efx->type->evq_ptr_tbl_base, |
1375 | index: channel->channel); |
1376 | |
1377 | return 0; |
1378 | } |
1379 | |
1380 | void efx_farch_ev_fini(struct efx_channel *channel) |
1381 | { |
1382 | efx_oword_t reg; |
1383 | struct efx_nic *efx = channel->efx; |
1384 | |
1385 | /* Remove event queue from card */ |
1386 | EFX_ZERO_OWORD(reg); |
1387 | efx_writeo_table(efx, value: ®, reg: efx->type->evq_ptr_tbl_base, |
1388 | index: channel->channel); |
1389 | efx_writeo_table(efx, value: ®, FR_BZ_TIMER_TBL, index: channel->channel); |
1390 | |
1391 | /* Unpin event queue */ |
1392 | efx_fini_special_buffer(efx, buffer: &channel->eventq); |
1393 | } |
1394 | |
1395 | /* Free buffers backing event queue */ |
1396 | void efx_farch_ev_remove(struct efx_channel *channel) |
1397 | { |
1398 | efx_free_special_buffer(efx: channel->efx, buffer: &channel->eventq); |
1399 | } |
1400 | |
1401 | |
1402 | void efx_farch_ev_test_generate(struct efx_channel *channel) |
1403 | { |
1404 | efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); |
1405 | } |
1406 | |
1407 | void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) |
1408 | { |
1409 | efx_farch_magic_event(channel: efx_rx_queue_channel(rx_queue), |
1410 | EFX_CHANNEL_MAGIC_FILL(rx_queue)); |
1411 | } |
1412 | |
1413 | /************************************************************************** |
1414 | * |
1415 | * Hardware interrupts |
1416 | * The hardware interrupt handler does very little work; all the event |
1417 | * queue processing is carried out by per-channel tasklets. |
1418 | * |
1419 | **************************************************************************/ |
1420 | |
1421 | /* Enable/disable/generate interrupts */ |
1422 | static inline void efx_farch_interrupts(struct efx_nic *efx, |
1423 | bool enabled, bool force) |
1424 | { |
1425 | efx_oword_t int_en_reg_ker; |
1426 | |
1427 | EFX_POPULATE_OWORD_3(int_en_reg_ker, |
1428 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, |
1429 | FRF_AZ_KER_INT_KER, force, |
1430 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1431 | efx_writeo(efx, value: &int_en_reg_ker, FR_AZ_INT_EN_KER); |
1432 | } |
1433 | |
1434 | void efx_farch_irq_enable_master(struct efx_nic *efx) |
1435 | { |
1436 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); |
1437 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ |
1438 | |
1439 | efx_farch_interrupts(efx, enabled: true, force: false); |
1440 | } |
1441 | |
1442 | void efx_farch_irq_disable_master(struct efx_nic *efx) |
1443 | { |
1444 | /* Disable interrupts */ |
1445 | efx_farch_interrupts(efx, enabled: false, force: false); |
1446 | } |
1447 | |
1448 | /* Generate a test interrupt |
1449 | * Interrupt must already have been enabled, otherwise nasty things |
1450 | * may happen. |
1451 | */ |
1452 | int efx_farch_irq_test_generate(struct efx_nic *efx) |
1453 | { |
1454 | efx_farch_interrupts(efx, enabled: true, force: true); |
1455 | return 0; |
1456 | } |
1457 | |
1458 | /* Process a fatal interrupt |
1459 | * Disable bus mastering ASAP and schedule a reset |
1460 | */ |
1461 | irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) |
1462 | { |
1463 | efx_oword_t *int_ker = efx->irq_status.addr; |
1464 | efx_oword_t fatal_intr; |
1465 | int error, mem_perr; |
1466 | |
1467 | efx_reado(efx, value: &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1468 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1469 | |
1470 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR " EFX_OWORD_FMT" status " |
1471 | EFX_OWORD_FMT ": %s\n" , EFX_OWORD_VAL(*int_ker), |
1472 | EFX_OWORD_VAL(fatal_intr), |
1473 | error ? "disabling bus mastering" : "no recognised error" ); |
1474 | |
1475 | /* If this is a memory parity error dump which blocks are offending */ |
1476 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
1477 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); |
1478 | if (mem_perr) { |
1479 | efx_oword_t reg; |
1480 | efx_reado(efx, value: ®, FR_AZ_MEM_STAT); |
1481 | netif_err(efx, hw, efx->net_dev, |
1482 | "SYSTEM ERROR: memory parity error " EFX_OWORD_FMT"\n" , |
1483 | EFX_OWORD_VAL(reg)); |
1484 | } |
1485 | |
1486 | /* Disable both devices */ |
1487 | pci_clear_master(dev: efx->pci_dev); |
1488 | efx_farch_irq_disable_master(efx); |
1489 | |
1490 | /* Count errors and reset or disable the NIC accordingly */ |
1491 | if (efx->int_error_count == 0 || |
1492 | time_after(jiffies, efx->int_error_expire)) { |
1493 | efx->int_error_count = 0; |
1494 | efx->int_error_expire = |
1495 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; |
1496 | } |
1497 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { |
1498 | netif_err(efx, hw, efx->net_dev, |
1499 | "SYSTEM ERROR - reset scheduled\n" ); |
1500 | efx_siena_schedule_reset(efx, type: RESET_TYPE_INT_ERROR); |
1501 | } else { |
1502 | netif_err(efx, hw, efx->net_dev, |
1503 | "SYSTEM ERROR - max number of errors seen." |
1504 | "NIC will be disabled\n" ); |
1505 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DISABLE); |
1506 | } |
1507 | |
1508 | return IRQ_HANDLED; |
1509 | } |
1510 | |
1511 | /* Handle a legacy interrupt |
1512 | * Acknowledges the interrupt and schedule event queue processing. |
1513 | */ |
1514 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) |
1515 | { |
1516 | struct efx_nic *efx = dev_id; |
1517 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1518 | efx_oword_t *int_ker = efx->irq_status.addr; |
1519 | irqreturn_t result = IRQ_NONE; |
1520 | struct efx_channel *channel; |
1521 | efx_dword_t reg; |
1522 | u32 queues; |
1523 | int syserr; |
1524 | |
1525 | /* Read the ISR which also ACKs the interrupts */ |
1526 | efx_readd(efx, value: ®, FR_BZ_INT_ISR0); |
1527 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
1528 | |
1529 | /* Legacy interrupts are disabled too late by the EEH kernel |
1530 | * code. Disable them earlier. |
1531 | * If an EEH error occurred, the read will have returned all ones. |
1532 | */ |
1533 | if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) && |
1534 | !efx->eeh_disabled_legacy_irq) { |
1535 | disable_irq_nosync(irq: efx->legacy_irq); |
1536 | efx->eeh_disabled_legacy_irq = true; |
1537 | } |
1538 | |
1539 | /* Handle non-event-queue sources */ |
1540 | if (queues & (1U << efx->irq_level) && soft_enabled) { |
1541 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1542 | if (unlikely(syserr)) |
1543 | return efx_farch_fatal_interrupt(efx); |
1544 | efx->last_irq_cpu = raw_smp_processor_id(); |
1545 | } |
1546 | |
1547 | if (queues != 0) { |
1548 | efx->irq_zero_count = 0; |
1549 | |
1550 | /* Schedule processing of any interrupting queues */ |
1551 | if (likely(soft_enabled)) { |
1552 | efx_for_each_channel(channel, efx) { |
1553 | if (queues & 1) |
1554 | efx_schedule_channel_irq(channel); |
1555 | queues >>= 1; |
1556 | } |
1557 | } |
1558 | result = IRQ_HANDLED; |
1559 | |
1560 | } else { |
1561 | efx_qword_t *event; |
1562 | |
1563 | /* Legacy ISR read can return zero once (SF bug 15783) */ |
1564 | |
1565 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 |
1566 | * because this might be a shared interrupt. */ |
1567 | if (efx->irq_zero_count++ == 0) |
1568 | result = IRQ_HANDLED; |
1569 | |
1570 | /* Ensure we schedule or rearm all event queues */ |
1571 | if (likely(soft_enabled)) { |
1572 | efx_for_each_channel(channel, efx) { |
1573 | event = efx_event(channel, |
1574 | index: channel->eventq_read_ptr); |
1575 | if (efx_event_present(event)) |
1576 | efx_schedule_channel_irq(channel); |
1577 | else |
1578 | efx_farch_ev_read_ack(channel); |
1579 | } |
1580 | } |
1581 | } |
1582 | |
1583 | if (result == IRQ_HANDLED) |
1584 | netif_vdbg(efx, intr, efx->net_dev, |
1585 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n" , |
1586 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
1587 | |
1588 | return result; |
1589 | } |
1590 | |
1591 | /* Handle an MSI interrupt |
1592 | * |
1593 | * Handle an MSI hardware interrupt. This routine schedules event |
1594 | * queue processing. No interrupt acknowledgement cycle is necessary. |
1595 | * Also, we never need to check that the interrupt is for us, since |
1596 | * MSI interrupts cannot be shared. |
1597 | */ |
1598 | irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) |
1599 | { |
1600 | struct efx_msi_context *context = dev_id; |
1601 | struct efx_nic *efx = context->efx; |
1602 | efx_oword_t *int_ker = efx->irq_status.addr; |
1603 | int syserr; |
1604 | |
1605 | netif_vdbg(efx, intr, efx->net_dev, |
1606 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n" , |
1607 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1608 | |
1609 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1610 | return IRQ_HANDLED; |
1611 | |
1612 | /* Handle non-event-queue sources */ |
1613 | if (context->index == efx->irq_level) { |
1614 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1615 | if (unlikely(syserr)) |
1616 | return efx_farch_fatal_interrupt(efx); |
1617 | efx->last_irq_cpu = raw_smp_processor_id(); |
1618 | } |
1619 | |
1620 | /* Schedule processing of the channel */ |
1621 | efx_schedule_channel_irq(channel: efx->channel[context->index]); |
1622 | |
1623 | return IRQ_HANDLED; |
1624 | } |
1625 | |
1626 | /* Setup RSS indirection table. |
1627 | * This maps from the hash value of the packet to RXQ |
1628 | */ |
1629 | void efx_farch_rx_push_indir_table(struct efx_nic *efx) |
1630 | { |
1631 | size_t i = 0; |
1632 | efx_dword_t dword; |
1633 | |
1634 | BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != |
1635 | FR_BZ_RX_INDIRECTION_TBL_ROWS); |
1636 | |
1637 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { |
1638 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, |
1639 | efx->rss_context.rx_indir_table[i]); |
1640 | efx_writed(efx, value: &dword, |
1641 | FR_BZ_RX_INDIRECTION_TBL + |
1642 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); |
1643 | } |
1644 | } |
1645 | |
1646 | void efx_farch_rx_pull_indir_table(struct efx_nic *efx) |
1647 | { |
1648 | size_t i = 0; |
1649 | efx_dword_t dword; |
1650 | |
1651 | BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != |
1652 | FR_BZ_RX_INDIRECTION_TBL_ROWS); |
1653 | |
1654 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { |
1655 | efx_readd(efx, value: &dword, |
1656 | FR_BZ_RX_INDIRECTION_TBL + |
1657 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); |
1658 | efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); |
1659 | } |
1660 | } |
1661 | |
1662 | /* Looks at available SRAM resources and works out how many queues we |
1663 | * can support, and where things like descriptor caches should live. |
1664 | * |
1665 | * SRAM is split up as follows: |
1666 | * 0 buftbl entries for channels |
1667 | * efx->vf_buftbl_base buftbl entries for SR-IOV |
1668 | * efx->rx_dc_base RX descriptor caches |
1669 | * efx->tx_dc_base TX descriptor caches |
1670 | */ |
1671 | void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) |
1672 | { |
1673 | unsigned vi_count, total_tx_channels; |
1674 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1675 | struct siena_nic_data *nic_data; |
1676 | unsigned buftbl_min; |
1677 | #endif |
1678 | |
1679 | total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; |
1680 | vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); |
1681 | |
1682 | #ifdef CONFIG_SFC_SIENA_SRIOV |
1683 | nic_data = efx->nic_data; |
1684 | /* Account for the buffer table entries backing the datapath channels |
1685 | * and the descriptor caches for those channels. |
1686 | */ |
1687 | buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + |
1688 | total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE + |
1689 | efx->n_channels * EFX_MAX_EVQ_SIZE) |
1690 | * sizeof(efx_qword_t) / EFX_BUF_SIZE); |
1691 | if (efx->type->sriov_wanted) { |
1692 | if (efx->type->sriov_wanted(efx)) { |
1693 | unsigned vi_dc_entries, buftbl_free; |
1694 | unsigned entries_per_vf, vf_limit; |
1695 | |
1696 | nic_data->vf_buftbl_base = buftbl_min; |
1697 | |
1698 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; |
1699 | vi_count = max(vi_count, EFX_VI_BASE); |
1700 | buftbl_free = (sram_lim_qw - buftbl_min - |
1701 | vi_count * vi_dc_entries); |
1702 | |
1703 | entries_per_vf = ((vi_dc_entries + |
1704 | EFX_VF_BUFTBL_PER_VI) * |
1705 | efx_vf_size(efx)); |
1706 | vf_limit = min(buftbl_free / entries_per_vf, |
1707 | (1024U - EFX_VI_BASE) >> efx->vi_scale); |
1708 | |
1709 | if (efx->vf_count > vf_limit) { |
1710 | netif_err(efx, probe, efx->net_dev, |
1711 | "Reducing VF count from from %d to %d\n" , |
1712 | efx->vf_count, vf_limit); |
1713 | efx->vf_count = vf_limit; |
1714 | } |
1715 | vi_count += efx->vf_count * efx_vf_size(efx); |
1716 | } |
1717 | } |
1718 | #endif |
1719 | |
1720 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; |
1721 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; |
1722 | } |
1723 | |
1724 | u32 efx_farch_fpga_ver(struct efx_nic *efx) |
1725 | { |
1726 | efx_oword_t altera_build; |
1727 | efx_reado(efx, value: &altera_build, FR_AZ_ALTERA_BUILD); |
1728 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); |
1729 | } |
1730 | |
1731 | void efx_farch_init_common(struct efx_nic *efx) |
1732 | { |
1733 | efx_oword_t temp; |
1734 | |
1735 | /* Set positions of descriptor caches in SRAM. */ |
1736 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); |
1737 | efx_writeo(efx, value: &temp, FR_AZ_SRM_TX_DC_CFG); |
1738 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); |
1739 | efx_writeo(efx, value: &temp, FR_AZ_SRM_RX_DC_CFG); |
1740 | |
1741 | /* Set TX descriptor cache size. */ |
1742 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); |
1743 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); |
1744 | efx_writeo(efx, value: &temp, FR_AZ_TX_DC_CFG); |
1745 | |
1746 | /* Set RX descriptor cache size. Set low watermark to size-8, as |
1747 | * this allows most efficient prefetching. |
1748 | */ |
1749 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); |
1750 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); |
1751 | efx_writeo(efx, value: &temp, FR_AZ_RX_DC_CFG); |
1752 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); |
1753 | efx_writeo(efx, value: &temp, FR_AZ_RX_DC_PF_WM); |
1754 | |
1755 | /* Program INT_KER address */ |
1756 | EFX_POPULATE_OWORD_2(temp, |
1757 | FRF_AZ_NORM_INT_VEC_DIS_KER, |
1758 | EFX_INT_MODE_USE_MSI(efx), |
1759 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); |
1760 | efx_writeo(efx, value: &temp, FR_AZ_INT_ADR_KER); |
1761 | |
1762 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) |
1763 | /* Use an interrupt level unused by event queues */ |
1764 | efx->irq_level = 0x1f; |
1765 | else |
1766 | /* Use a valid MSI-X vector */ |
1767 | efx->irq_level = 0; |
1768 | |
1769 | /* Enable all the genuinely fatal interrupts. (They are still |
1770 | * masked by the overall interrupt mask, controlled by |
1771 | * falcon_interrupts()). |
1772 | * |
1773 | * Note: All other fatal interrupts are enabled |
1774 | */ |
1775 | EFX_POPULATE_OWORD_3(temp, |
1776 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, |
1777 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, |
1778 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); |
1779 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); |
1780 | EFX_INVERT_OWORD(temp); |
1781 | efx_writeo(efx, value: &temp, FR_AZ_FATAL_INTR_KER); |
1782 | |
1783 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be |
1784 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. |
1785 | */ |
1786 | efx_reado(efx, value: &temp, FR_AZ_TX_RESERVED); |
1787 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1788 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1789 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1790 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); |
1791 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1792 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1793 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
1794 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
1795 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
1796 | /* Disable hardware watchdog which can misfire */ |
1797 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); |
1798 | /* Squash TX of packets of 16 bytes or less */ |
1799 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
1800 | efx_writeo(efx, value: &temp, FR_AZ_TX_RESERVED); |
1801 | |
1802 | EFX_POPULATE_OWORD_4(temp, |
1803 | /* Default values */ |
1804 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, |
1805 | FRF_BZ_TX_PACE_SB_AF, 0xb, |
1806 | FRF_BZ_TX_PACE_FB_BASE, 0, |
1807 | /* Allow large pace values in the fast bin. */ |
1808 | FRF_BZ_TX_PACE_BIN_TH, |
1809 | FFE_BZ_TX_PACE_RESERVED); |
1810 | efx_writeo(efx, value: &temp, FR_BZ_TX_PACE); |
1811 | } |
1812 | |
1813 | /************************************************************************** |
1814 | * |
1815 | * Filter tables |
1816 | * |
1817 | ************************************************************************** |
1818 | */ |
1819 | |
1820 | /* "Fudge factors" - difference between programmed value and actual depth. |
1821 | * Due to pipelined implementation we need to program H/W with a value that |
1822 | * is larger than the hop limit we want. |
1823 | */ |
1824 | #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 |
1825 | #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 |
1826 | |
1827 | /* Hard maximum search limit. Hardware will time-out beyond 200-something. |
1828 | * We also need to avoid infinite loops in efx_farch_filter_search() when the |
1829 | * table is full. |
1830 | */ |
1831 | #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 |
1832 | |
1833 | /* Don't try very hard to find space for performance hints, as this is |
1834 | * counter-productive. */ |
1835 | #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 |
1836 | |
1837 | enum efx_farch_filter_type { |
1838 | EFX_FARCH_FILTER_TCP_FULL = 0, |
1839 | EFX_FARCH_FILTER_TCP_WILD, |
1840 | EFX_FARCH_FILTER_UDP_FULL, |
1841 | EFX_FARCH_FILTER_UDP_WILD, |
1842 | EFX_FARCH_FILTER_MAC_FULL = 4, |
1843 | EFX_FARCH_FILTER_MAC_WILD, |
1844 | EFX_FARCH_FILTER_UC_DEF = 8, |
1845 | EFX_FARCH_FILTER_MC_DEF, |
1846 | EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ |
1847 | }; |
1848 | |
1849 | enum efx_farch_filter_table_id { |
1850 | EFX_FARCH_FILTER_TABLE_RX_IP = 0, |
1851 | EFX_FARCH_FILTER_TABLE_RX_MAC, |
1852 | EFX_FARCH_FILTER_TABLE_RX_DEF, |
1853 | EFX_FARCH_FILTER_TABLE_TX_MAC, |
1854 | EFX_FARCH_FILTER_TABLE_COUNT, |
1855 | }; |
1856 | |
1857 | enum efx_farch_filter_index { |
1858 | EFX_FARCH_FILTER_INDEX_UC_DEF, |
1859 | EFX_FARCH_FILTER_INDEX_MC_DEF, |
1860 | EFX_FARCH_FILTER_SIZE_RX_DEF, |
1861 | }; |
1862 | |
1863 | struct efx_farch_filter_spec { |
1864 | u8 type:4; |
1865 | u8 priority:4; |
1866 | u8 flags; |
1867 | u16 dmaq_id; |
1868 | u32 data[3]; |
1869 | }; |
1870 | |
1871 | struct efx_farch_filter_table { |
1872 | enum efx_farch_filter_table_id id; |
1873 | u32 offset; /* address of table relative to BAR */ |
1874 | unsigned size; /* number of entries */ |
1875 | unsigned step; /* step between entries */ |
1876 | unsigned used; /* number currently used */ |
1877 | unsigned long *used_bitmap; |
1878 | struct efx_farch_filter_spec *spec; |
1879 | unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; |
1880 | }; |
1881 | |
1882 | struct efx_farch_filter_state { |
1883 | struct rw_semaphore lock; /* Protects table contents */ |
1884 | struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; |
1885 | }; |
1886 | |
1887 | static void |
1888 | efx_farch_filter_table_clear_entry(struct efx_nic *efx, |
1889 | struct efx_farch_filter_table *table, |
1890 | unsigned int filter_idx); |
1891 | |
1892 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit |
1893 | * key derived from the n-tuple. The initial LFSR state is 0xffff. */ |
1894 | static u16 efx_farch_filter_hash(u32 key) |
1895 | { |
1896 | u16 tmp; |
1897 | |
1898 | /* First 16 rounds */ |
1899 | tmp = 0x1fff ^ key >> 16; |
1900 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; |
1901 | tmp = tmp ^ tmp >> 9; |
1902 | /* Last 16 rounds */ |
1903 | tmp = tmp ^ tmp << 13 ^ key; |
1904 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; |
1905 | return tmp ^ tmp >> 9; |
1906 | } |
1907 | |
1908 | /* To allow for hash collisions, filter search continues at these |
1909 | * increments from the first possible entry selected by the hash. */ |
1910 | static u16 efx_farch_filter_increment(u32 key) |
1911 | { |
1912 | return key * 2 - 1; |
1913 | } |
1914 | |
1915 | static enum efx_farch_filter_table_id |
1916 | efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) |
1917 | { |
1918 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != |
1919 | (EFX_FARCH_FILTER_TCP_FULL >> 2)); |
1920 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != |
1921 | (EFX_FARCH_FILTER_TCP_WILD >> 2)); |
1922 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != |
1923 | (EFX_FARCH_FILTER_UDP_FULL >> 2)); |
1924 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != |
1925 | (EFX_FARCH_FILTER_UDP_WILD >> 2)); |
1926 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != |
1927 | (EFX_FARCH_FILTER_MAC_FULL >> 2)); |
1928 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != |
1929 | (EFX_FARCH_FILTER_MAC_WILD >> 2)); |
1930 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != |
1931 | EFX_FARCH_FILTER_TABLE_RX_MAC + 2); |
1932 | return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); |
1933 | } |
1934 | |
1935 | static void efx_farch_filter_push_rx_config(struct efx_nic *efx) |
1936 | { |
1937 | struct efx_farch_filter_state *state = efx->filter_state; |
1938 | struct efx_farch_filter_table *table; |
1939 | efx_oword_t filter_ctl; |
1940 | |
1941 | efx_reado(efx, value: &filter_ctl, FR_BZ_RX_FILTER_CTL); |
1942 | |
1943 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; |
1944 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, |
1945 | table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + |
1946 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1947 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, |
1948 | table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + |
1949 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1950 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, |
1951 | table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + |
1952 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1953 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, |
1954 | table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + |
1955 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1956 | |
1957 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; |
1958 | if (table->size) { |
1959 | EFX_SET_OWORD_FIELD( |
1960 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, |
1961 | table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + |
1962 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1963 | EFX_SET_OWORD_FIELD( |
1964 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, |
1965 | table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + |
1966 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1967 | } |
1968 | |
1969 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; |
1970 | if (table->size) { |
1971 | EFX_SET_OWORD_FIELD( |
1972 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, |
1973 | table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); |
1974 | EFX_SET_OWORD_FIELD( |
1975 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, |
1976 | !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & |
1977 | EFX_FILTER_FLAG_RX_RSS)); |
1978 | EFX_SET_OWORD_FIELD( |
1979 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, |
1980 | table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); |
1981 | EFX_SET_OWORD_FIELD( |
1982 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, |
1983 | !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & |
1984 | EFX_FILTER_FLAG_RX_RSS)); |
1985 | |
1986 | /* There is a single bit to enable RX scatter for all |
1987 | * unmatched packets. Only set it if scatter is |
1988 | * enabled in both filter specs. |
1989 | */ |
1990 | EFX_SET_OWORD_FIELD( |
1991 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, |
1992 | !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & |
1993 | table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & |
1994 | EFX_FILTER_FLAG_RX_SCATTER)); |
1995 | } else { |
1996 | /* We don't expose 'default' filters because unmatched |
1997 | * packets always go to the queue number found in the |
1998 | * RSS table. But we still need to set the RX scatter |
1999 | * bit here. |
2000 | */ |
2001 | EFX_SET_OWORD_FIELD( |
2002 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, |
2003 | efx->rx_scatter); |
2004 | } |
2005 | |
2006 | efx_writeo(efx, value: &filter_ctl, FR_BZ_RX_FILTER_CTL); |
2007 | } |
2008 | |
2009 | static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) |
2010 | { |
2011 | struct efx_farch_filter_state *state = efx->filter_state; |
2012 | struct efx_farch_filter_table *table; |
2013 | efx_oword_t tx_cfg; |
2014 | |
2015 | efx_reado(efx, value: &tx_cfg, FR_AZ_TX_CFG); |
2016 | |
2017 | table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; |
2018 | if (table->size) { |
2019 | EFX_SET_OWORD_FIELD( |
2020 | tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, |
2021 | table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + |
2022 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
2023 | EFX_SET_OWORD_FIELD( |
2024 | tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, |
2025 | table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + |
2026 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
2027 | } |
2028 | |
2029 | efx_writeo(efx, value: &tx_cfg, FR_AZ_TX_CFG); |
2030 | } |
2031 | |
2032 | static int |
2033 | efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, |
2034 | const struct efx_filter_spec *gen_spec) |
2035 | { |
2036 | bool is_full = false; |
2037 | |
2038 | if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) |
2039 | return -EINVAL; |
2040 | |
2041 | spec->priority = gen_spec->priority; |
2042 | spec->flags = gen_spec->flags; |
2043 | spec->dmaq_id = gen_spec->dmaq_id; |
2044 | |
2045 | switch (gen_spec->match_flags) { |
2046 | case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
2047 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | |
2048 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): |
2049 | is_full = true; |
2050 | fallthrough; |
2051 | case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
2052 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { |
2053 | __be32 rhost, host1, host2; |
2054 | __be16 rport, port1, port2; |
2055 | |
2056 | EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); |
2057 | |
2058 | if (gen_spec->ether_type != htons(ETH_P_IP)) |
2059 | return -EPROTONOSUPPORT; |
2060 | if (gen_spec->loc_port == 0 || |
2061 | (is_full && gen_spec->rem_port == 0)) |
2062 | return -EADDRNOTAVAIL; |
2063 | switch (gen_spec->ip_proto) { |
2064 | case IPPROTO_TCP: |
2065 | spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : |
2066 | EFX_FARCH_FILTER_TCP_WILD); |
2067 | break; |
2068 | case IPPROTO_UDP: |
2069 | spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : |
2070 | EFX_FARCH_FILTER_UDP_WILD); |
2071 | break; |
2072 | default: |
2073 | return -EPROTONOSUPPORT; |
2074 | } |
2075 | |
2076 | /* Filter is constructed in terms of source and destination, |
2077 | * with the odd wrinkle that the ports are swapped in a UDP |
2078 | * wildcard filter. We need to convert from local and remote |
2079 | * (= zero for wildcard) addresses. |
2080 | */ |
2081 | rhost = is_full ? gen_spec->rem_host[0] : 0; |
2082 | rport = is_full ? gen_spec->rem_port : 0; |
2083 | host1 = rhost; |
2084 | host2 = gen_spec->loc_host[0]; |
2085 | if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { |
2086 | port1 = gen_spec->loc_port; |
2087 | port2 = rport; |
2088 | } else { |
2089 | port1 = rport; |
2090 | port2 = gen_spec->loc_port; |
2091 | } |
2092 | spec->data[0] = ntohl(host1) << 16 | ntohs(port1); |
2093 | spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; |
2094 | spec->data[2] = ntohl(host2); |
2095 | |
2096 | break; |
2097 | } |
2098 | |
2099 | case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: |
2100 | is_full = true; |
2101 | fallthrough; |
2102 | case EFX_FILTER_MATCH_LOC_MAC: |
2103 | spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : |
2104 | EFX_FARCH_FILTER_MAC_WILD); |
2105 | spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; |
2106 | spec->data[1] = (gen_spec->loc_mac[2] << 24 | |
2107 | gen_spec->loc_mac[3] << 16 | |
2108 | gen_spec->loc_mac[4] << 8 | |
2109 | gen_spec->loc_mac[5]); |
2110 | spec->data[2] = (gen_spec->loc_mac[0] << 8 | |
2111 | gen_spec->loc_mac[1]); |
2112 | break; |
2113 | |
2114 | case EFX_FILTER_MATCH_LOC_MAC_IG: |
2115 | spec->type = (is_multicast_ether_addr(addr: gen_spec->loc_mac) ? |
2116 | EFX_FARCH_FILTER_MC_DEF : |
2117 | EFX_FARCH_FILTER_UC_DEF); |
2118 | memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ |
2119 | break; |
2120 | |
2121 | default: |
2122 | return -EPROTONOSUPPORT; |
2123 | } |
2124 | |
2125 | return 0; |
2126 | } |
2127 | |
2128 | static void |
2129 | efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, |
2130 | const struct efx_farch_filter_spec *spec) |
2131 | { |
2132 | bool is_full = false; |
2133 | |
2134 | /* *gen_spec should be completely initialised, to be consistent |
2135 | * with efx_filter_init_{rx,tx}() and in case we want to copy |
2136 | * it back to userland. |
2137 | */ |
2138 | memset(gen_spec, 0, sizeof(*gen_spec)); |
2139 | |
2140 | gen_spec->priority = spec->priority; |
2141 | gen_spec->flags = spec->flags; |
2142 | gen_spec->dmaq_id = spec->dmaq_id; |
2143 | |
2144 | switch (spec->type) { |
2145 | case EFX_FARCH_FILTER_TCP_FULL: |
2146 | case EFX_FARCH_FILTER_UDP_FULL: |
2147 | is_full = true; |
2148 | fallthrough; |
2149 | case EFX_FARCH_FILTER_TCP_WILD: |
2150 | case EFX_FARCH_FILTER_UDP_WILD: { |
2151 | __be32 host1, host2; |
2152 | __be16 port1, port2; |
2153 | |
2154 | gen_spec->match_flags = |
2155 | EFX_FILTER_MATCH_ETHER_TYPE | |
2156 | EFX_FILTER_MATCH_IP_PROTO | |
2157 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; |
2158 | if (is_full) |
2159 | gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | |
2160 | EFX_FILTER_MATCH_REM_PORT); |
2161 | gen_spec->ether_type = htons(ETH_P_IP); |
2162 | gen_spec->ip_proto = |
2163 | (spec->type == EFX_FARCH_FILTER_TCP_FULL || |
2164 | spec->type == EFX_FARCH_FILTER_TCP_WILD) ? |
2165 | IPPROTO_TCP : IPPROTO_UDP; |
2166 | |
2167 | host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); |
2168 | port1 = htons(spec->data[0]); |
2169 | host2 = htonl(spec->data[2]); |
2170 | port2 = htons(spec->data[1] >> 16); |
2171 | if (spec->flags & EFX_FILTER_FLAG_TX) { |
2172 | gen_spec->loc_host[0] = host1; |
2173 | gen_spec->rem_host[0] = host2; |
2174 | } else { |
2175 | gen_spec->loc_host[0] = host2; |
2176 | gen_spec->rem_host[0] = host1; |
2177 | } |
2178 | if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ |
2179 | (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { |
2180 | gen_spec->loc_port = port1; |
2181 | gen_spec->rem_port = port2; |
2182 | } else { |
2183 | gen_spec->loc_port = port2; |
2184 | gen_spec->rem_port = port1; |
2185 | } |
2186 | |
2187 | break; |
2188 | } |
2189 | |
2190 | case EFX_FARCH_FILTER_MAC_FULL: |
2191 | is_full = true; |
2192 | fallthrough; |
2193 | case EFX_FARCH_FILTER_MAC_WILD: |
2194 | gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; |
2195 | if (is_full) |
2196 | gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; |
2197 | gen_spec->loc_mac[0] = spec->data[2] >> 8; |
2198 | gen_spec->loc_mac[1] = spec->data[2]; |
2199 | gen_spec->loc_mac[2] = spec->data[1] >> 24; |
2200 | gen_spec->loc_mac[3] = spec->data[1] >> 16; |
2201 | gen_spec->loc_mac[4] = spec->data[1] >> 8; |
2202 | gen_spec->loc_mac[5] = spec->data[1]; |
2203 | gen_spec->outer_vid = htons(spec->data[0]); |
2204 | break; |
2205 | |
2206 | case EFX_FARCH_FILTER_UC_DEF: |
2207 | case EFX_FARCH_FILTER_MC_DEF: |
2208 | gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; |
2209 | gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; |
2210 | break; |
2211 | |
2212 | default: |
2213 | WARN_ON(1); |
2214 | break; |
2215 | } |
2216 | } |
2217 | |
2218 | static void |
2219 | efx_farch_filter_init_rx_auto(struct efx_nic *efx, |
2220 | struct efx_farch_filter_spec *spec) |
2221 | { |
2222 | /* If there's only one channel then disable RSS for non VF |
2223 | * traffic, thereby allowing VFs to use RSS when the PF can't. |
2224 | */ |
2225 | spec->priority = EFX_FILTER_PRI_AUTO; |
2226 | spec->flags = (EFX_FILTER_FLAG_RX | |
2227 | (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | |
2228 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); |
2229 | spec->dmaq_id = 0; |
2230 | } |
2231 | |
2232 | /* Build a filter entry and return its n-tuple key. */ |
2233 | static u32 efx_farch_filter_build(efx_oword_t *filter, |
2234 | struct efx_farch_filter_spec *spec) |
2235 | { |
2236 | u32 data3; |
2237 | |
2238 | switch (efx_farch_filter_spec_table_id(spec)) { |
2239 | case EFX_FARCH_FILTER_TABLE_RX_IP: { |
2240 | bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || |
2241 | spec->type == EFX_FARCH_FILTER_UDP_WILD); |
2242 | EFX_POPULATE_OWORD_7( |
2243 | *filter, |
2244 | FRF_BZ_RSS_EN, |
2245 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), |
2246 | FRF_BZ_SCATTER_EN, |
2247 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), |
2248 | FRF_BZ_TCP_UDP, is_udp, |
2249 | FRF_BZ_RXQ_ID, spec->dmaq_id, |
2250 | EFX_DWORD_2, spec->data[2], |
2251 | EFX_DWORD_1, spec->data[1], |
2252 | EFX_DWORD_0, spec->data[0]); |
2253 | data3 = is_udp; |
2254 | break; |
2255 | } |
2256 | |
2257 | case EFX_FARCH_FILTER_TABLE_RX_MAC: { |
2258 | bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; |
2259 | EFX_POPULATE_OWORD_7( |
2260 | *filter, |
2261 | FRF_CZ_RMFT_RSS_EN, |
2262 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), |
2263 | FRF_CZ_RMFT_SCATTER_EN, |
2264 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), |
2265 | FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, |
2266 | FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, |
2267 | FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], |
2268 | FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], |
2269 | FRF_CZ_RMFT_VLAN_ID, spec->data[0]); |
2270 | data3 = is_wild; |
2271 | break; |
2272 | } |
2273 | |
2274 | case EFX_FARCH_FILTER_TABLE_TX_MAC: { |
2275 | bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; |
2276 | EFX_POPULATE_OWORD_5(*filter, |
2277 | FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, |
2278 | FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, |
2279 | FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], |
2280 | FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], |
2281 | FRF_CZ_TMFT_VLAN_ID, spec->data[0]); |
2282 | data3 = is_wild | spec->dmaq_id << 1; |
2283 | break; |
2284 | } |
2285 | |
2286 | default: |
2287 | BUG(); |
2288 | } |
2289 | |
2290 | return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; |
2291 | } |
2292 | |
2293 | static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, |
2294 | const struct efx_farch_filter_spec *right) |
2295 | { |
2296 | if (left->type != right->type || |
2297 | memcmp(p: left->data, q: right->data, size: sizeof(left->data))) |
2298 | return false; |
2299 | |
2300 | if (left->flags & EFX_FILTER_FLAG_TX && |
2301 | left->dmaq_id != right->dmaq_id) |
2302 | return false; |
2303 | |
2304 | return true; |
2305 | } |
2306 | |
2307 | /* |
2308 | * Construct/deconstruct external filter IDs. At least the RX filter |
2309 | * IDs must be ordered by matching priority, for RX NFC semantics. |
2310 | * |
2311 | * Deconstruction needs to be robust against invalid IDs so that |
2312 | * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can |
2313 | * accept user-provided IDs. |
2314 | */ |
2315 | |
2316 | #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 |
2317 | |
2318 | static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { |
2319 | [EFX_FARCH_FILTER_TCP_FULL] = 0, |
2320 | [EFX_FARCH_FILTER_UDP_FULL] = 0, |
2321 | [EFX_FARCH_FILTER_TCP_WILD] = 1, |
2322 | [EFX_FARCH_FILTER_UDP_WILD] = 1, |
2323 | [EFX_FARCH_FILTER_MAC_FULL] = 2, |
2324 | [EFX_FARCH_FILTER_MAC_WILD] = 3, |
2325 | [EFX_FARCH_FILTER_UC_DEF] = 4, |
2326 | [EFX_FARCH_FILTER_MC_DEF] = 4, |
2327 | }; |
2328 | |
2329 | static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { |
2330 | EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ |
2331 | EFX_FARCH_FILTER_TABLE_RX_IP, |
2332 | EFX_FARCH_FILTER_TABLE_RX_MAC, |
2333 | EFX_FARCH_FILTER_TABLE_RX_MAC, |
2334 | EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ |
2335 | EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ |
2336 | EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ |
2337 | }; |
2338 | |
2339 | #define EFX_FARCH_FILTER_INDEX_WIDTH 13 |
2340 | #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) |
2341 | |
2342 | static inline u32 |
2343 | efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, |
2344 | unsigned int index) |
2345 | { |
2346 | unsigned int range; |
2347 | |
2348 | range = efx_farch_filter_type_match_pri[spec->type]; |
2349 | if (!(spec->flags & EFX_FILTER_FLAG_RX)) |
2350 | range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; |
2351 | |
2352 | return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; |
2353 | } |
2354 | |
2355 | static inline enum efx_farch_filter_table_id |
2356 | efx_farch_filter_id_table_id(u32 id) |
2357 | { |
2358 | unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; |
2359 | |
2360 | if (range < ARRAY_SIZE(efx_farch_filter_range_table)) |
2361 | return efx_farch_filter_range_table[range]; |
2362 | else |
2363 | return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ |
2364 | } |
2365 | |
2366 | static inline unsigned int efx_farch_filter_id_index(u32 id) |
2367 | { |
2368 | return id & EFX_FARCH_FILTER_INDEX_MASK; |
2369 | } |
2370 | |
2371 | u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) |
2372 | { |
2373 | struct efx_farch_filter_state *state = efx->filter_state; |
2374 | unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; |
2375 | enum efx_farch_filter_table_id table_id; |
2376 | |
2377 | do { |
2378 | table_id = efx_farch_filter_range_table[range]; |
2379 | if (state->table[table_id].size != 0) |
2380 | return range << EFX_FARCH_FILTER_INDEX_WIDTH | |
2381 | state->table[table_id].size; |
2382 | } while (range--); |
2383 | |
2384 | return 0; |
2385 | } |
2386 | |
2387 | s32 efx_farch_filter_insert(struct efx_nic *efx, |
2388 | struct efx_filter_spec *gen_spec, |
2389 | bool replace_equal) |
2390 | { |
2391 | struct efx_farch_filter_state *state = efx->filter_state; |
2392 | struct efx_farch_filter_table *table; |
2393 | struct efx_farch_filter_spec spec; |
2394 | efx_oword_t filter; |
2395 | int rep_index, ins_index; |
2396 | unsigned int depth = 0; |
2397 | int rc; |
2398 | |
2399 | rc = efx_farch_filter_from_gen_spec(spec: &spec, gen_spec); |
2400 | if (rc) |
2401 | return rc; |
2402 | |
2403 | down_write(sem: &state->lock); |
2404 | |
2405 | table = &state->table[efx_farch_filter_spec_table_id(spec: &spec)]; |
2406 | if (table->size == 0) { |
2407 | rc = -EINVAL; |
2408 | goto out_unlock; |
2409 | } |
2410 | |
2411 | netif_vdbg(efx, hw, efx->net_dev, |
2412 | "%s: type %d search_limit=%d" , __func__, spec.type, |
2413 | table->search_limit[spec.type]); |
2414 | |
2415 | if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { |
2416 | /* One filter spec per type */ |
2417 | BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); |
2418 | BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != |
2419 | EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); |
2420 | rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; |
2421 | ins_index = rep_index; |
2422 | } else { |
2423 | /* Search concurrently for |
2424 | * (1) a filter to be replaced (rep_index): any filter |
2425 | * with the same match values, up to the current |
2426 | * search depth for this type, and |
2427 | * (2) the insertion point (ins_index): (1) or any |
2428 | * free slot before it or up to the maximum search |
2429 | * depth for this priority |
2430 | * We fail if we cannot find (2). |
2431 | * |
2432 | * We can stop once either |
2433 | * (a) we find (1), in which case we have definitely |
2434 | * found (2) as well; or |
2435 | * (b) we have searched exhaustively for (1), and have |
2436 | * either found (2) or searched exhaustively for it |
2437 | */ |
2438 | u32 key = efx_farch_filter_build(filter: &filter, spec: &spec); |
2439 | unsigned int hash = efx_farch_filter_hash(key); |
2440 | unsigned int incr = efx_farch_filter_increment(key); |
2441 | unsigned int max_rep_depth = table->search_limit[spec.type]; |
2442 | unsigned int max_ins_depth = |
2443 | spec.priority <= EFX_FILTER_PRI_HINT ? |
2444 | EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : |
2445 | EFX_FARCH_FILTER_CTL_SRCH_MAX; |
2446 | unsigned int i = hash & (table->size - 1); |
2447 | |
2448 | ins_index = -1; |
2449 | depth = 1; |
2450 | |
2451 | for (;;) { |
2452 | if (!test_bit(i, table->used_bitmap)) { |
2453 | if (ins_index < 0) |
2454 | ins_index = i; |
2455 | } else if (efx_farch_filter_equal(left: &spec, |
2456 | right: &table->spec[i])) { |
2457 | /* Case (a) */ |
2458 | if (ins_index < 0) |
2459 | ins_index = i; |
2460 | rep_index = i; |
2461 | break; |
2462 | } |
2463 | |
2464 | if (depth >= max_rep_depth && |
2465 | (ins_index >= 0 || depth >= max_ins_depth)) { |
2466 | /* Case (b) */ |
2467 | if (ins_index < 0) { |
2468 | rc = -EBUSY; |
2469 | goto out_unlock; |
2470 | } |
2471 | rep_index = -1; |
2472 | break; |
2473 | } |
2474 | |
2475 | i = (i + incr) & (table->size - 1); |
2476 | ++depth; |
2477 | } |
2478 | } |
2479 | |
2480 | /* If we found a filter to be replaced, check whether we |
2481 | * should do so |
2482 | */ |
2483 | if (rep_index >= 0) { |
2484 | struct efx_farch_filter_spec *saved_spec = |
2485 | &table->spec[rep_index]; |
2486 | |
2487 | if (spec.priority == saved_spec->priority && !replace_equal) { |
2488 | rc = -EEXIST; |
2489 | goto out_unlock; |
2490 | } |
2491 | if (spec.priority < saved_spec->priority) { |
2492 | rc = -EPERM; |
2493 | goto out_unlock; |
2494 | } |
2495 | if (saved_spec->priority == EFX_FILTER_PRI_AUTO || |
2496 | saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) |
2497 | spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; |
2498 | } |
2499 | |
2500 | /* Insert the filter */ |
2501 | if (ins_index != rep_index) { |
2502 | __set_bit(ins_index, table->used_bitmap); |
2503 | ++table->used; |
2504 | } |
2505 | table->spec[ins_index] = spec; |
2506 | |
2507 | if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { |
2508 | efx_farch_filter_push_rx_config(efx); |
2509 | } else { |
2510 | if (table->search_limit[spec.type] < depth) { |
2511 | table->search_limit[spec.type] = depth; |
2512 | if (spec.flags & EFX_FILTER_FLAG_TX) |
2513 | efx_farch_filter_push_tx_limits(efx); |
2514 | else |
2515 | efx_farch_filter_push_rx_config(efx); |
2516 | } |
2517 | |
2518 | efx_writeo(efx, value: &filter, |
2519 | reg: table->offset + table->step * ins_index); |
2520 | |
2521 | /* If we were able to replace a filter by inserting |
2522 | * at a lower depth, clear the replaced filter |
2523 | */ |
2524 | if (ins_index != rep_index && rep_index >= 0) |
2525 | efx_farch_filter_table_clear_entry(efx, table, |
2526 | filter_idx: rep_index); |
2527 | } |
2528 | |
2529 | netif_vdbg(efx, hw, efx->net_dev, |
2530 | "%s: filter type %d index %d rxq %u set" , |
2531 | __func__, spec.type, ins_index, spec.dmaq_id); |
2532 | rc = efx_farch_filter_make_id(spec: &spec, index: ins_index); |
2533 | |
2534 | out_unlock: |
2535 | up_write(sem: &state->lock); |
2536 | return rc; |
2537 | } |
2538 | |
2539 | static void |
2540 | efx_farch_filter_table_clear_entry(struct efx_nic *efx, |
2541 | struct efx_farch_filter_table *table, |
2542 | unsigned int filter_idx) |
2543 | { |
2544 | static efx_oword_t filter; |
2545 | |
2546 | EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); |
2547 | BUG_ON(table->offset == 0); /* can't clear MAC default filters */ |
2548 | |
2549 | __clear_bit(filter_idx, table->used_bitmap); |
2550 | --table->used; |
2551 | memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); |
2552 | |
2553 | efx_writeo(efx, value: &filter, reg: table->offset + table->step * filter_idx); |
2554 | |
2555 | /* If this filter required a greater search depth than |
2556 | * any other, the search limit for its type can now be |
2557 | * decreased. However, it is hard to determine that |
2558 | * unless the table has become completely empty - in |
2559 | * which case, all its search limits can be set to 0. |
2560 | */ |
2561 | if (unlikely(table->used == 0)) { |
2562 | memset(table->search_limit, 0, sizeof(table->search_limit)); |
2563 | if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) |
2564 | efx_farch_filter_push_tx_limits(efx); |
2565 | else |
2566 | efx_farch_filter_push_rx_config(efx); |
2567 | } |
2568 | } |
2569 | |
2570 | static int efx_farch_filter_remove(struct efx_nic *efx, |
2571 | struct efx_farch_filter_table *table, |
2572 | unsigned int filter_idx, |
2573 | enum efx_filter_priority priority) |
2574 | { |
2575 | struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; |
2576 | |
2577 | if (!test_bit(filter_idx, table->used_bitmap) || |
2578 | spec->priority != priority) |
2579 | return -ENOENT; |
2580 | |
2581 | if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { |
2582 | efx_farch_filter_init_rx_auto(efx, spec); |
2583 | efx_farch_filter_push_rx_config(efx); |
2584 | } else { |
2585 | efx_farch_filter_table_clear_entry(efx, table, filter_idx); |
2586 | } |
2587 | |
2588 | return 0; |
2589 | } |
2590 | |
2591 | int efx_farch_filter_remove_safe(struct efx_nic *efx, |
2592 | enum efx_filter_priority priority, |
2593 | u32 filter_id) |
2594 | { |
2595 | struct efx_farch_filter_state *state = efx->filter_state; |
2596 | enum efx_farch_filter_table_id table_id; |
2597 | struct efx_farch_filter_table *table; |
2598 | unsigned int filter_idx; |
2599 | int rc; |
2600 | |
2601 | table_id = efx_farch_filter_id_table_id(id: filter_id); |
2602 | if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) |
2603 | return -ENOENT; |
2604 | table = &state->table[table_id]; |
2605 | |
2606 | filter_idx = efx_farch_filter_id_index(id: filter_id); |
2607 | if (filter_idx >= table->size) |
2608 | return -ENOENT; |
2609 | down_write(sem: &state->lock); |
2610 | |
2611 | rc = efx_farch_filter_remove(efx, table, filter_idx, priority); |
2612 | up_write(sem: &state->lock); |
2613 | |
2614 | return rc; |
2615 | } |
2616 | |
2617 | int efx_farch_filter_get_safe(struct efx_nic *efx, |
2618 | enum efx_filter_priority priority, |
2619 | u32 filter_id, struct efx_filter_spec *spec_buf) |
2620 | { |
2621 | struct efx_farch_filter_state *state = efx->filter_state; |
2622 | enum efx_farch_filter_table_id table_id; |
2623 | struct efx_farch_filter_table *table; |
2624 | struct efx_farch_filter_spec *spec; |
2625 | unsigned int filter_idx; |
2626 | int rc = -ENOENT; |
2627 | |
2628 | down_read(sem: &state->lock); |
2629 | |
2630 | table_id = efx_farch_filter_id_table_id(id: filter_id); |
2631 | if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) |
2632 | goto out_unlock; |
2633 | table = &state->table[table_id]; |
2634 | |
2635 | filter_idx = efx_farch_filter_id_index(id: filter_id); |
2636 | if (filter_idx >= table->size) |
2637 | goto out_unlock; |
2638 | spec = &table->spec[filter_idx]; |
2639 | |
2640 | if (test_bit(filter_idx, table->used_bitmap) && |
2641 | spec->priority == priority) { |
2642 | efx_farch_filter_to_gen_spec(gen_spec: spec_buf, spec); |
2643 | rc = 0; |
2644 | } |
2645 | |
2646 | out_unlock: |
2647 | up_read(sem: &state->lock); |
2648 | return rc; |
2649 | } |
2650 | |
2651 | static void |
2652 | efx_farch_filter_table_clear(struct efx_nic *efx, |
2653 | enum efx_farch_filter_table_id table_id, |
2654 | enum efx_filter_priority priority) |
2655 | { |
2656 | struct efx_farch_filter_state *state = efx->filter_state; |
2657 | struct efx_farch_filter_table *table = &state->table[table_id]; |
2658 | unsigned int filter_idx; |
2659 | |
2660 | down_write(sem: &state->lock); |
2661 | for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { |
2662 | if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) |
2663 | efx_farch_filter_remove(efx, table, |
2664 | filter_idx, priority); |
2665 | } |
2666 | up_write(sem: &state->lock); |
2667 | } |
2668 | |
2669 | int efx_farch_filter_clear_rx(struct efx_nic *efx, |
2670 | enum efx_filter_priority priority) |
2671 | { |
2672 | efx_farch_filter_table_clear(efx, table_id: EFX_FARCH_FILTER_TABLE_RX_IP, |
2673 | priority); |
2674 | efx_farch_filter_table_clear(efx, table_id: EFX_FARCH_FILTER_TABLE_RX_MAC, |
2675 | priority); |
2676 | efx_farch_filter_table_clear(efx, table_id: EFX_FARCH_FILTER_TABLE_RX_DEF, |
2677 | priority); |
2678 | return 0; |
2679 | } |
2680 | |
2681 | u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, |
2682 | enum efx_filter_priority priority) |
2683 | { |
2684 | struct efx_farch_filter_state *state = efx->filter_state; |
2685 | enum efx_farch_filter_table_id table_id; |
2686 | struct efx_farch_filter_table *table; |
2687 | unsigned int filter_idx; |
2688 | u32 count = 0; |
2689 | |
2690 | down_read(sem: &state->lock); |
2691 | |
2692 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; |
2693 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; |
2694 | table_id++) { |
2695 | table = &state->table[table_id]; |
2696 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2697 | if (test_bit(filter_idx, table->used_bitmap) && |
2698 | table->spec[filter_idx].priority == priority) |
2699 | ++count; |
2700 | } |
2701 | } |
2702 | |
2703 | up_read(sem: &state->lock); |
2704 | |
2705 | return count; |
2706 | } |
2707 | |
2708 | s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, |
2709 | enum efx_filter_priority priority, |
2710 | u32 *buf, u32 size) |
2711 | { |
2712 | struct efx_farch_filter_state *state = efx->filter_state; |
2713 | enum efx_farch_filter_table_id table_id; |
2714 | struct efx_farch_filter_table *table; |
2715 | unsigned int filter_idx; |
2716 | s32 count = 0; |
2717 | |
2718 | down_read(sem: &state->lock); |
2719 | |
2720 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; |
2721 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; |
2722 | table_id++) { |
2723 | table = &state->table[table_id]; |
2724 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2725 | if (test_bit(filter_idx, table->used_bitmap) && |
2726 | table->spec[filter_idx].priority == priority) { |
2727 | if (count == size) { |
2728 | count = -EMSGSIZE; |
2729 | goto out; |
2730 | } |
2731 | buf[count++] = efx_farch_filter_make_id( |
2732 | spec: &table->spec[filter_idx], index: filter_idx); |
2733 | } |
2734 | } |
2735 | } |
2736 | out: |
2737 | up_read(sem: &state->lock); |
2738 | |
2739 | return count; |
2740 | } |
2741 | |
2742 | /* Restore filter stater after reset */ |
2743 | void efx_farch_filter_table_restore(struct efx_nic *efx) |
2744 | { |
2745 | struct efx_farch_filter_state *state = efx->filter_state; |
2746 | enum efx_farch_filter_table_id table_id; |
2747 | struct efx_farch_filter_table *table; |
2748 | efx_oword_t filter; |
2749 | unsigned int filter_idx; |
2750 | |
2751 | down_write(sem: &state->lock); |
2752 | |
2753 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2754 | table = &state->table[table_id]; |
2755 | |
2756 | /* Check whether this is a regular register table */ |
2757 | if (table->step == 0) |
2758 | continue; |
2759 | |
2760 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2761 | if (!test_bit(filter_idx, table->used_bitmap)) |
2762 | continue; |
2763 | efx_farch_filter_build(filter: &filter, spec: &table->spec[filter_idx]); |
2764 | efx_writeo(efx, value: &filter, |
2765 | reg: table->offset + table->step * filter_idx); |
2766 | } |
2767 | } |
2768 | |
2769 | efx_farch_filter_push_rx_config(efx); |
2770 | efx_farch_filter_push_tx_limits(efx); |
2771 | |
2772 | up_write(sem: &state->lock); |
2773 | } |
2774 | |
2775 | void efx_farch_filter_table_remove(struct efx_nic *efx) |
2776 | { |
2777 | struct efx_farch_filter_state *state = efx->filter_state; |
2778 | enum efx_farch_filter_table_id table_id; |
2779 | |
2780 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2781 | bitmap_free(bitmap: state->table[table_id].used_bitmap); |
2782 | vfree(addr: state->table[table_id].spec); |
2783 | } |
2784 | kfree(objp: state); |
2785 | } |
2786 | |
2787 | int efx_farch_filter_table_probe(struct efx_nic *efx) |
2788 | { |
2789 | struct efx_farch_filter_state *state; |
2790 | struct efx_farch_filter_table *table; |
2791 | unsigned table_id; |
2792 | |
2793 | state = kzalloc(size: sizeof(struct efx_farch_filter_state), GFP_KERNEL); |
2794 | if (!state) |
2795 | return -ENOMEM; |
2796 | efx->filter_state = state; |
2797 | init_rwsem(&state->lock); |
2798 | |
2799 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; |
2800 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; |
2801 | table->offset = FR_BZ_RX_FILTER_TBL0; |
2802 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; |
2803 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; |
2804 | |
2805 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; |
2806 | table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; |
2807 | table->offset = FR_CZ_RX_MAC_FILTER_TBL0; |
2808 | table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; |
2809 | table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; |
2810 | |
2811 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; |
2812 | table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; |
2813 | table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; |
2814 | |
2815 | table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; |
2816 | table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; |
2817 | table->offset = FR_CZ_TX_MAC_FILTER_TBL0; |
2818 | table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; |
2819 | table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; |
2820 | |
2821 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2822 | table = &state->table[table_id]; |
2823 | if (table->size == 0) |
2824 | continue; |
2825 | table->used_bitmap = bitmap_zalloc(nbits: table->size, GFP_KERNEL); |
2826 | if (!table->used_bitmap) |
2827 | goto fail; |
2828 | table->spec = vzalloc(array_size(sizeof(*table->spec), |
2829 | table->size)); |
2830 | if (!table->spec) |
2831 | goto fail; |
2832 | } |
2833 | |
2834 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; |
2835 | if (table->size) { |
2836 | /* RX default filters must always exist */ |
2837 | struct efx_farch_filter_spec *spec; |
2838 | unsigned i; |
2839 | |
2840 | for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { |
2841 | spec = &table->spec[i]; |
2842 | spec->type = EFX_FARCH_FILTER_UC_DEF + i; |
2843 | efx_farch_filter_init_rx_auto(efx, spec); |
2844 | __set_bit(i, table->used_bitmap); |
2845 | } |
2846 | } |
2847 | |
2848 | efx_farch_filter_push_rx_config(efx); |
2849 | |
2850 | return 0; |
2851 | |
2852 | fail: |
2853 | efx_farch_filter_table_remove(efx); |
2854 | return -ENOMEM; |
2855 | } |
2856 | |
2857 | /* Update scatter enable flags for filters pointing to our own RX queues */ |
2858 | void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) |
2859 | { |
2860 | struct efx_farch_filter_state *state = efx->filter_state; |
2861 | enum efx_farch_filter_table_id table_id; |
2862 | struct efx_farch_filter_table *table; |
2863 | efx_oword_t filter; |
2864 | unsigned int filter_idx; |
2865 | |
2866 | down_write(sem: &state->lock); |
2867 | |
2868 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; |
2869 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; |
2870 | table_id++) { |
2871 | table = &state->table[table_id]; |
2872 | |
2873 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2874 | if (!test_bit(filter_idx, table->used_bitmap) || |
2875 | table->spec[filter_idx].dmaq_id >= |
2876 | efx->n_rx_channels) |
2877 | continue; |
2878 | |
2879 | if (efx->rx_scatter) |
2880 | table->spec[filter_idx].flags |= |
2881 | EFX_FILTER_FLAG_RX_SCATTER; |
2882 | else |
2883 | table->spec[filter_idx].flags &= |
2884 | ~EFX_FILTER_FLAG_RX_SCATTER; |
2885 | |
2886 | if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) |
2887 | /* Pushed by efx_farch_filter_push_rx_config() */ |
2888 | continue; |
2889 | |
2890 | efx_farch_filter_build(filter: &filter, spec: &table->spec[filter_idx]); |
2891 | efx_writeo(efx, value: &filter, |
2892 | reg: table->offset + table->step * filter_idx); |
2893 | } |
2894 | } |
2895 | |
2896 | efx_farch_filter_push_rx_config(efx); |
2897 | |
2898 | up_write(sem: &state->lock); |
2899 | } |
2900 | |
2901 | #ifdef CONFIG_RFS_ACCEL |
2902 | |
2903 | bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, |
2904 | unsigned int index) |
2905 | { |
2906 | struct efx_farch_filter_state *state = efx->filter_state; |
2907 | struct efx_farch_filter_table *table; |
2908 | bool ret = false, force = false; |
2909 | u16 arfs_id; |
2910 | |
2911 | down_write(sem: &state->lock); |
2912 | spin_lock_bh(lock: &efx->rps_hash_lock); |
2913 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; |
2914 | if (test_bit(index, table->used_bitmap) && |
2915 | table->spec[index].priority == EFX_FILTER_PRI_HINT) { |
2916 | struct efx_arfs_rule *rule = NULL; |
2917 | struct efx_filter_spec spec; |
2918 | |
2919 | efx_farch_filter_to_gen_spec(gen_spec: &spec, spec: &table->spec[index]); |
2920 | if (!efx->rps_hash_table) { |
2921 | /* In the absence of the table, we always returned 0 to |
2922 | * ARFS, so use the same to query it. |
2923 | */ |
2924 | arfs_id = 0; |
2925 | } else { |
2926 | rule = efx_siena_rps_hash_find(efx, spec: &spec); |
2927 | if (!rule) { |
2928 | /* ARFS table doesn't know of this filter, remove it */ |
2929 | force = true; |
2930 | } else { |
2931 | arfs_id = rule->arfs_id; |
2932 | if (!efx_siena_rps_check_rule(rule, filter_idx: index, |
2933 | force: &force)) |
2934 | goto out_unlock; |
2935 | } |
2936 | } |
2937 | if (force || rps_may_expire_flow(dev: efx->net_dev, rxq_index: spec.dmaq_id, |
2938 | flow_id, filter_id: arfs_id)) { |
2939 | if (rule) |
2940 | rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; |
2941 | efx_siena_rps_hash_del(efx, spec: &spec); |
2942 | efx_farch_filter_table_clear_entry(efx, table, filter_idx: index); |
2943 | ret = true; |
2944 | } |
2945 | } |
2946 | out_unlock: |
2947 | spin_unlock_bh(lock: &efx->rps_hash_lock); |
2948 | up_write(sem: &state->lock); |
2949 | return ret; |
2950 | } |
2951 | |
2952 | #endif /* CONFIG_RFS_ACCEL */ |
2953 | |
2954 | void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) |
2955 | { |
2956 | struct net_device *net_dev = efx->net_dev; |
2957 | struct netdev_hw_addr *ha; |
2958 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; |
2959 | u32 crc; |
2960 | int bit; |
2961 | |
2962 | if (!efx_dev_registered(efx)) |
2963 | return; |
2964 | |
2965 | netif_addr_lock_bh(dev: net_dev); |
2966 | |
2967 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
2968 | |
2969 | /* Build multicast hash table */ |
2970 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { |
2971 | memset(mc_hash, 0xff, sizeof(*mc_hash)); |
2972 | } else { |
2973 | memset(mc_hash, 0x00, sizeof(*mc_hash)); |
2974 | netdev_for_each_mc_addr(ha, net_dev) { |
2975 | crc = ether_crc_le(ETH_ALEN, ha->addr); |
2976 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); |
2977 | __set_bit_le(nr: bit, addr: mc_hash); |
2978 | } |
2979 | |
2980 | /* Broadcast packets go through the multicast hash filter. |
2981 | * ether_crc_le() of the broadcast address is 0xbe2612ff |
2982 | * so we always add bit 0xff to the mask. |
2983 | */ |
2984 | __set_bit_le(nr: 0xff, addr: mc_hash); |
2985 | } |
2986 | |
2987 | netif_addr_unlock_bh(dev: net_dev); |
2988 | } |
2989 | |