1 | /* bnx2x_sp.c: Qlogic Everest network driver. |
2 | * |
3 | * Copyright 2011-2013 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation |
5 | * All rights reserved |
6 | * |
7 | * Unless you and Qlogic execute a separate written software license |
8 | * agreement governing use of this software, this software is licensed to you |
9 | * under the terms of the GNU General Public License version 2, available |
10 | * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). |
11 | * |
12 | * Notwithstanding the above, under no circumstances may you combine this |
13 | * software in any way with any other Qlogic software provided under a |
14 | * license other than the GPL, without Qlogic's express prior written |
15 | * consent. |
16 | * |
17 | * Maintained by: Ariel Elior <ariel.elior@qlogic.com> |
18 | * Written by: Vladislav Zolotarov |
19 | * |
20 | */ |
21 | |
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/crc32.h> |
26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> |
28 | #include <linux/crc32c.h> |
29 | #include "bnx2x.h" |
30 | #include "bnx2x_cmn.h" |
31 | #include "bnx2x_sp.h" |
32 | |
33 | #define BNX2X_MAX_EMUL_MULTI 16 |
34 | |
35 | /**** Exe Queue interfaces ****/ |
36 | |
37 | /** |
38 | * bnx2x_exe_queue_init - init the Exe Queue object |
39 | * |
40 | * @bp: driver handle |
41 | * @o: pointer to the object |
42 | * @exe_len: length |
43 | * @owner: pointer to the owner |
44 | * @validate: validate function pointer |
45 | * @remove: remove function pointer |
46 | * @optimize: optimize function pointer |
47 | * @exec: execute function pointer |
48 | * @get: get function pointer |
49 | */ |
50 | static inline void bnx2x_exe_queue_init(struct bnx2x *bp, |
51 | struct bnx2x_exe_queue_obj *o, |
52 | int exe_len, |
53 | union bnx2x_qable_obj *owner, |
54 | exe_q_validate validate, |
55 | exe_q_remove remove, |
56 | exe_q_optimize optimize, |
57 | exe_q_execute exec, |
58 | exe_q_get get) |
59 | { |
60 | memset(o, 0, sizeof(*o)); |
61 | |
62 | INIT_LIST_HEAD(list: &o->exe_queue); |
63 | INIT_LIST_HEAD(list: &o->pending_comp); |
64 | |
65 | spin_lock_init(&o->lock); |
66 | |
67 | o->exe_chunk_len = exe_len; |
68 | o->owner = owner; |
69 | |
70 | /* Owner specific callbacks */ |
71 | o->validate = validate; |
72 | o->remove = remove; |
73 | o->optimize = optimize; |
74 | o->execute = exec; |
75 | o->get = get; |
76 | |
77 | DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n" , |
78 | exe_len); |
79 | } |
80 | |
81 | static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, |
82 | struct bnx2x_exeq_elem *elem) |
83 | { |
84 | DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n" ); |
85 | kfree(objp: elem); |
86 | } |
87 | |
88 | static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) |
89 | { |
90 | struct bnx2x_exeq_elem *elem; |
91 | int cnt = 0; |
92 | |
93 | spin_lock_bh(lock: &o->lock); |
94 | |
95 | list_for_each_entry(elem, &o->exe_queue, link) |
96 | cnt++; |
97 | |
98 | spin_unlock_bh(lock: &o->lock); |
99 | |
100 | return cnt; |
101 | } |
102 | |
103 | /** |
104 | * bnx2x_exe_queue_add - add a new element to the execution queue |
105 | * |
106 | * @bp: driver handle |
107 | * @o: queue |
108 | * @elem: new command to add |
109 | * @restore: true - do not optimize the command |
110 | * |
111 | * If the element is optimized or is illegal, frees it. |
112 | */ |
113 | static inline int bnx2x_exe_queue_add(struct bnx2x *bp, |
114 | struct bnx2x_exe_queue_obj *o, |
115 | struct bnx2x_exeq_elem *elem, |
116 | bool restore) |
117 | { |
118 | int rc; |
119 | |
120 | spin_lock_bh(lock: &o->lock); |
121 | |
122 | if (!restore) { |
123 | /* Try to cancel this element queue */ |
124 | rc = o->optimize(bp, o->owner, elem); |
125 | if (rc) |
126 | goto free_and_exit; |
127 | |
128 | /* Check if this request is ok */ |
129 | rc = o->validate(bp, o->owner, elem); |
130 | if (rc) { |
131 | DP(BNX2X_MSG_SP, "Preamble failed: %d\n" , rc); |
132 | goto free_and_exit; |
133 | } |
134 | } |
135 | |
136 | /* If so, add it to the execution queue */ |
137 | list_add_tail(new: &elem->link, head: &o->exe_queue); |
138 | |
139 | spin_unlock_bh(lock: &o->lock); |
140 | |
141 | return 0; |
142 | |
143 | free_and_exit: |
144 | bnx2x_exe_queue_free_elem(bp, elem); |
145 | |
146 | spin_unlock_bh(lock: &o->lock); |
147 | |
148 | return rc; |
149 | } |
150 | |
151 | static inline void __bnx2x_exe_queue_reset_pending( |
152 | struct bnx2x *bp, |
153 | struct bnx2x_exe_queue_obj *o) |
154 | { |
155 | struct bnx2x_exeq_elem *elem; |
156 | |
157 | while (!list_empty(head: &o->pending_comp)) { |
158 | elem = list_first_entry(&o->pending_comp, |
159 | struct bnx2x_exeq_elem, link); |
160 | |
161 | list_del(entry: &elem->link); |
162 | bnx2x_exe_queue_free_elem(bp, elem); |
163 | } |
164 | } |
165 | |
166 | /** |
167 | * bnx2x_exe_queue_step - execute one execution chunk atomically |
168 | * |
169 | * @bp: driver handle |
170 | * @o: queue |
171 | * @ramrod_flags: flags |
172 | * |
173 | * (Should be called while holding the exe_queue->lock). |
174 | */ |
175 | static inline int bnx2x_exe_queue_step(struct bnx2x *bp, |
176 | struct bnx2x_exe_queue_obj *o, |
177 | unsigned long *ramrod_flags) |
178 | { |
179 | struct bnx2x_exeq_elem *elem, spacer; |
180 | int cur_len = 0, rc; |
181 | |
182 | memset(&spacer, 0, sizeof(spacer)); |
183 | |
184 | /* Next step should not be performed until the current is finished, |
185 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to |
186 | * properly clear object internals without sending any command to the FW |
187 | * which also implies there won't be any completion to clear the |
188 | * 'pending' list. |
189 | */ |
190 | if (!list_empty(head: &o->pending_comp)) { |
191 | if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { |
192 | DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n" ); |
193 | __bnx2x_exe_queue_reset_pending(bp, o); |
194 | } else { |
195 | return 1; |
196 | } |
197 | } |
198 | |
199 | /* Run through the pending commands list and create a next |
200 | * execution chunk. |
201 | */ |
202 | while (!list_empty(head: &o->exe_queue)) { |
203 | elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, |
204 | link); |
205 | WARN_ON(!elem->cmd_len); |
206 | |
207 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { |
208 | cur_len += elem->cmd_len; |
209 | /* Prevent from both lists being empty when moving an |
210 | * element. This will allow the call of |
211 | * bnx2x_exe_queue_empty() without locking. |
212 | */ |
213 | list_add_tail(new: &spacer.link, head: &o->pending_comp); |
214 | mb(); |
215 | list_move_tail(list: &elem->link, head: &o->pending_comp); |
216 | list_del(entry: &spacer.link); |
217 | } else |
218 | break; |
219 | } |
220 | |
221 | /* Sanity check */ |
222 | if (!cur_len) |
223 | return 0; |
224 | |
225 | rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); |
226 | if (rc < 0) |
227 | /* In case of an error return the commands back to the queue |
228 | * and reset the pending_comp. |
229 | */ |
230 | list_splice_init(list: &o->pending_comp, head: &o->exe_queue); |
231 | else if (!rc) |
232 | /* If zero is returned, means there are no outstanding pending |
233 | * completions and we may dismiss the pending list. |
234 | */ |
235 | __bnx2x_exe_queue_reset_pending(bp, o); |
236 | |
237 | return rc; |
238 | } |
239 | |
240 | static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) |
241 | { |
242 | bool empty = list_empty(head: &o->exe_queue); |
243 | |
244 | /* Don't reorder!!! */ |
245 | mb(); |
246 | |
247 | return empty && list_empty(head: &o->pending_comp); |
248 | } |
249 | |
250 | static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( |
251 | struct bnx2x *bp) |
252 | { |
253 | DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n" ); |
254 | return kzalloc(size: sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); |
255 | } |
256 | |
257 | /************************ raw_obj functions ***********************************/ |
258 | static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) |
259 | { |
260 | return !!test_bit(o->state, o->pstate); |
261 | } |
262 | |
263 | static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) |
264 | { |
265 | smp_mb__before_atomic(); |
266 | clear_bit(nr: o->state, addr: o->pstate); |
267 | smp_mb__after_atomic(); |
268 | } |
269 | |
270 | static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) |
271 | { |
272 | smp_mb__before_atomic(); |
273 | set_bit(nr: o->state, addr: o->pstate); |
274 | smp_mb__after_atomic(); |
275 | } |
276 | |
277 | /** |
278 | * bnx2x_state_wait - wait until the given bit(state) is cleared |
279 | * |
280 | * @bp: device handle |
281 | * @state: state which is to be cleared |
282 | * @pstate: state buffer |
283 | * |
284 | */ |
285 | static inline int bnx2x_state_wait(struct bnx2x *bp, int state, |
286 | unsigned long *pstate) |
287 | { |
288 | /* can take a while if any port is running */ |
289 | int cnt = 5000; |
290 | |
291 | if (CHIP_REV_IS_EMUL(bp)) |
292 | cnt *= 20; |
293 | |
294 | DP(BNX2X_MSG_SP, "waiting for state to become %d\n" , state); |
295 | |
296 | might_sleep(); |
297 | while (cnt--) { |
298 | if (!test_bit(state, pstate)) { |
299 | #ifdef BNX2X_STOP_ON_ERROR |
300 | DP(BNX2X_MSG_SP, "exit (cnt %d)\n" , 5000 - cnt); |
301 | #endif |
302 | return 0; |
303 | } |
304 | |
305 | usleep_range(min: 1000, max: 2000); |
306 | |
307 | if (bp->panic) |
308 | return -EIO; |
309 | } |
310 | |
311 | /* timeout! */ |
312 | BNX2X_ERR("timeout waiting for state %d\n" , state); |
313 | #ifdef BNX2X_STOP_ON_ERROR |
314 | bnx2x_panic(); |
315 | #endif |
316 | |
317 | return -EBUSY; |
318 | } |
319 | |
320 | static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) |
321 | { |
322 | return bnx2x_state_wait(bp, state: raw->state, pstate: raw->pstate); |
323 | } |
324 | |
325 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ |
326 | /* credit handling callbacks */ |
327 | static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) |
328 | { |
329 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
330 | |
331 | WARN_ON(!mp); |
332 | |
333 | return mp->get_entry(mp, offset); |
334 | } |
335 | |
336 | static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) |
337 | { |
338 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
339 | |
340 | WARN_ON(!mp); |
341 | |
342 | return mp->get(mp, 1); |
343 | } |
344 | |
345 | static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) |
346 | { |
347 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
348 | |
349 | WARN_ON(!vp); |
350 | |
351 | return vp->get_entry(vp, offset); |
352 | } |
353 | |
354 | static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) |
355 | { |
356 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
357 | |
358 | WARN_ON(!vp); |
359 | |
360 | return vp->get(vp, 1); |
361 | } |
362 | |
363 | static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) |
364 | { |
365 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
366 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
367 | |
368 | if (!mp->get(mp, 1)) |
369 | return false; |
370 | |
371 | if (!vp->get(vp, 1)) { |
372 | mp->put(mp, 1); |
373 | return false; |
374 | } |
375 | |
376 | return true; |
377 | } |
378 | |
379 | static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) |
380 | { |
381 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
382 | |
383 | return mp->put_entry(mp, offset); |
384 | } |
385 | |
386 | static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) |
387 | { |
388 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
389 | |
390 | return mp->put(mp, 1); |
391 | } |
392 | |
393 | static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) |
394 | { |
395 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
396 | |
397 | return vp->put_entry(vp, offset); |
398 | } |
399 | |
400 | static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) |
401 | { |
402 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
403 | |
404 | return vp->put(vp, 1); |
405 | } |
406 | |
407 | static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) |
408 | { |
409 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; |
410 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; |
411 | |
412 | if (!mp->put(mp, 1)) |
413 | return false; |
414 | |
415 | if (!vp->put(vp, 1)) { |
416 | mp->get(mp, 1); |
417 | return false; |
418 | } |
419 | |
420 | return true; |
421 | } |
422 | |
423 | /** |
424 | * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock |
425 | * |
426 | * @bp: device handle |
427 | * @o: vlan_mac object |
428 | * |
429 | * Context: Non-blocking implementation; should be called under execution |
430 | * queue lock. |
431 | */ |
432 | static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, |
433 | struct bnx2x_vlan_mac_obj *o) |
434 | { |
435 | if (o->head_reader) { |
436 | DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n" ); |
437 | return -EBUSY; |
438 | } |
439 | |
440 | DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n" ); |
441 | return 0; |
442 | } |
443 | |
444 | /** |
445 | * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step |
446 | * |
447 | * @bp: device handle |
448 | * @o: vlan_mac object |
449 | * |
450 | * details Should be called under execution queue lock; notice it might release |
451 | * and reclaim it during its run. |
452 | */ |
453 | static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, |
454 | struct bnx2x_vlan_mac_obj *o) |
455 | { |
456 | int rc; |
457 | unsigned long ramrod_flags = o->saved_ramrod_flags; |
458 | |
459 | DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n" , |
460 | ramrod_flags); |
461 | o->head_exe_request = false; |
462 | o->saved_ramrod_flags = 0; |
463 | rc = bnx2x_exe_queue_step(bp, o: &o->exe_queue, ramrod_flags: &ramrod_flags); |
464 | if ((rc != 0) && (rc != 1)) { |
465 | BNX2X_ERR("execution of pending commands failed with rc %d\n" , |
466 | rc); |
467 | #ifdef BNX2X_STOP_ON_ERROR |
468 | bnx2x_panic(); |
469 | #endif |
470 | } |
471 | } |
472 | |
473 | /** |
474 | * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run |
475 | * |
476 | * @bp: device handle |
477 | * @o: vlan_mac object |
478 | * @ramrod_flags: ramrod flags of missed execution |
479 | * |
480 | * Context: Should be called under execution queue lock. |
481 | */ |
482 | static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, |
483 | struct bnx2x_vlan_mac_obj *o, |
484 | unsigned long ramrod_flags) |
485 | { |
486 | o->head_exe_request = true; |
487 | o->saved_ramrod_flags = ramrod_flags; |
488 | DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n" , |
489 | ramrod_flags); |
490 | } |
491 | |
492 | /** |
493 | * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock |
494 | * |
495 | * @bp: device handle |
496 | * @o: vlan_mac object |
497 | * |
498 | * Context: Should be called under execution queue lock. Notice if a pending |
499 | * execution exists, it would perform it - possibly releasing and |
500 | * reclaiming the execution queue lock. |
501 | */ |
502 | static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, |
503 | struct bnx2x_vlan_mac_obj *o) |
504 | { |
505 | /* It's possible a new pending execution was added since this writer |
506 | * executed. If so, execute again. [Ad infinitum] |
507 | */ |
508 | while (o->head_exe_request) { |
509 | DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n" ); |
510 | __bnx2x_vlan_mac_h_exec_pending(bp, o); |
511 | } |
512 | } |
513 | |
514 | |
515 | /** |
516 | * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock |
517 | * |
518 | * @bp: device handle |
519 | * @o: vlan_mac object |
520 | * |
521 | * Context: Should be called under the execution queue lock. May sleep. May |
522 | * release and reclaim execution queue lock during its run. |
523 | */ |
524 | static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, |
525 | struct bnx2x_vlan_mac_obj *o) |
526 | { |
527 | /* If we got here, we're holding lock --> no WRITER exists */ |
528 | o->head_reader++; |
529 | DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n" , |
530 | o->head_reader); |
531 | |
532 | return 0; |
533 | } |
534 | |
535 | /** |
536 | * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock |
537 | * |
538 | * @bp: device handle |
539 | * @o: vlan_mac object |
540 | * |
541 | * Context: May sleep. Claims and releases execution queue lock during its run. |
542 | */ |
543 | int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, |
544 | struct bnx2x_vlan_mac_obj *o) |
545 | { |
546 | int rc; |
547 | |
548 | spin_lock_bh(lock: &o->exe_queue.lock); |
549 | rc = __bnx2x_vlan_mac_h_read_lock(bp, o); |
550 | spin_unlock_bh(lock: &o->exe_queue.lock); |
551 | |
552 | return rc; |
553 | } |
554 | |
555 | /** |
556 | * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock |
557 | * |
558 | * @bp: device handle |
559 | * @o: vlan_mac object |
560 | * |
561 | * Context: Should be called under execution queue lock. Notice if a pending |
562 | * execution exists, it would be performed if this was the last |
563 | * reader. possibly releasing and reclaiming the execution queue lock. |
564 | */ |
565 | static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, |
566 | struct bnx2x_vlan_mac_obj *o) |
567 | { |
568 | if (!o->head_reader) { |
569 | BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n" ); |
570 | #ifdef BNX2X_STOP_ON_ERROR |
571 | bnx2x_panic(); |
572 | #endif |
573 | } else { |
574 | o->head_reader--; |
575 | DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n" , |
576 | o->head_reader); |
577 | } |
578 | |
579 | /* It's possible a new pending execution was added, and that this reader |
580 | * was last - if so we need to execute the command. |
581 | */ |
582 | if (!o->head_reader && o->head_exe_request) { |
583 | DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n" ); |
584 | |
585 | /* Writer release will do the trick */ |
586 | __bnx2x_vlan_mac_h_write_unlock(bp, o); |
587 | } |
588 | } |
589 | |
590 | /** |
591 | * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock |
592 | * |
593 | * @bp: device handle |
594 | * @o: vlan_mac object |
595 | * |
596 | * Context: Notice if a pending execution exists, it would be performed if this |
597 | * was the last reader. Claims and releases the execution queue lock |
598 | * during its run. |
599 | */ |
600 | void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, |
601 | struct bnx2x_vlan_mac_obj *o) |
602 | { |
603 | spin_lock_bh(lock: &o->exe_queue.lock); |
604 | __bnx2x_vlan_mac_h_read_unlock(bp, o); |
605 | spin_unlock_bh(lock: &o->exe_queue.lock); |
606 | } |
607 | |
608 | static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, |
609 | int n, u8 *base, u8 stride, u8 size) |
610 | { |
611 | struct bnx2x_vlan_mac_registry_elem *pos; |
612 | u8 *next = base; |
613 | int counter = 0; |
614 | int read_lock; |
615 | |
616 | DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n" ); |
617 | read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); |
618 | if (read_lock != 0) |
619 | BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n" ); |
620 | |
621 | /* traverse list */ |
622 | list_for_each_entry(pos, &o->head, link) { |
623 | if (counter < n) { |
624 | memcpy(next, &pos->u, size); |
625 | counter++; |
626 | DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n" , |
627 | counter, next); |
628 | next += stride + size; |
629 | } |
630 | } |
631 | |
632 | if (read_lock == 0) { |
633 | DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n" ); |
634 | bnx2x_vlan_mac_h_read_unlock(bp, o); |
635 | } |
636 | |
637 | return counter * ETH_ALEN; |
638 | } |
639 | |
640 | /* check_add() callbacks */ |
641 | static int bnx2x_check_mac_add(struct bnx2x *bp, |
642 | struct bnx2x_vlan_mac_obj *o, |
643 | union bnx2x_classification_ramrod_data *data) |
644 | { |
645 | struct bnx2x_vlan_mac_registry_elem *pos; |
646 | |
647 | DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n" , data->mac.mac); |
648 | |
649 | if (!is_valid_ether_addr(addr: data->mac.mac)) |
650 | return -EINVAL; |
651 | |
652 | /* Check if a requested MAC already exists */ |
653 | list_for_each_entry(pos, &o->head, link) |
654 | if (ether_addr_equal(addr1: data->mac.mac, addr2: pos->u.mac.mac) && |
655 | (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) |
656 | return -EEXIST; |
657 | |
658 | return 0; |
659 | } |
660 | |
661 | static int bnx2x_check_vlan_add(struct bnx2x *bp, |
662 | struct bnx2x_vlan_mac_obj *o, |
663 | union bnx2x_classification_ramrod_data *data) |
664 | { |
665 | struct bnx2x_vlan_mac_registry_elem *pos; |
666 | |
667 | DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n" , data->vlan.vlan); |
668 | |
669 | list_for_each_entry(pos, &o->head, link) |
670 | if (data->vlan.vlan == pos->u.vlan.vlan) |
671 | return -EEXIST; |
672 | |
673 | return 0; |
674 | } |
675 | |
676 | static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, |
677 | struct bnx2x_vlan_mac_obj *o, |
678 | union bnx2x_classification_ramrod_data *data) |
679 | { |
680 | struct bnx2x_vlan_mac_registry_elem *pos; |
681 | |
682 | DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n" , |
683 | data->vlan_mac.mac, data->vlan_mac.vlan); |
684 | |
685 | list_for_each_entry(pos, &o->head, link) |
686 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && |
687 | (!memcmp(p: data->vlan_mac.mac, q: pos->u.vlan_mac.mac, |
688 | ETH_ALEN)) && |
689 | (data->vlan_mac.is_inner_mac == |
690 | pos->u.vlan_mac.is_inner_mac)) |
691 | return -EEXIST; |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | /* check_del() callbacks */ |
697 | static struct bnx2x_vlan_mac_registry_elem * |
698 | bnx2x_check_mac_del(struct bnx2x *bp, |
699 | struct bnx2x_vlan_mac_obj *o, |
700 | union bnx2x_classification_ramrod_data *data) |
701 | { |
702 | struct bnx2x_vlan_mac_registry_elem *pos; |
703 | |
704 | DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n" , data->mac.mac); |
705 | |
706 | list_for_each_entry(pos, &o->head, link) |
707 | if (ether_addr_equal(addr1: data->mac.mac, addr2: pos->u.mac.mac) && |
708 | (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) |
709 | return pos; |
710 | |
711 | return NULL; |
712 | } |
713 | |
714 | static struct bnx2x_vlan_mac_registry_elem * |
715 | bnx2x_check_vlan_del(struct bnx2x *bp, |
716 | struct bnx2x_vlan_mac_obj *o, |
717 | union bnx2x_classification_ramrod_data *data) |
718 | { |
719 | struct bnx2x_vlan_mac_registry_elem *pos; |
720 | |
721 | DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n" , data->vlan.vlan); |
722 | |
723 | list_for_each_entry(pos, &o->head, link) |
724 | if (data->vlan.vlan == pos->u.vlan.vlan) |
725 | return pos; |
726 | |
727 | return NULL; |
728 | } |
729 | |
730 | static struct bnx2x_vlan_mac_registry_elem * |
731 | bnx2x_check_vlan_mac_del(struct bnx2x *bp, |
732 | struct bnx2x_vlan_mac_obj *o, |
733 | union bnx2x_classification_ramrod_data *data) |
734 | { |
735 | struct bnx2x_vlan_mac_registry_elem *pos; |
736 | |
737 | DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n" , |
738 | data->vlan_mac.mac, data->vlan_mac.vlan); |
739 | |
740 | list_for_each_entry(pos, &o->head, link) |
741 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && |
742 | (!memcmp(p: data->vlan_mac.mac, q: pos->u.vlan_mac.mac, |
743 | ETH_ALEN)) && |
744 | (data->vlan_mac.is_inner_mac == |
745 | pos->u.vlan_mac.is_inner_mac)) |
746 | return pos; |
747 | |
748 | return NULL; |
749 | } |
750 | |
751 | /* check_move() callback */ |
752 | static bool bnx2x_check_move(struct bnx2x *bp, |
753 | struct bnx2x_vlan_mac_obj *src_o, |
754 | struct bnx2x_vlan_mac_obj *dst_o, |
755 | union bnx2x_classification_ramrod_data *data) |
756 | { |
757 | struct bnx2x_vlan_mac_registry_elem *pos; |
758 | int rc; |
759 | |
760 | /* Check if we can delete the requested configuration from the first |
761 | * object. |
762 | */ |
763 | pos = src_o->check_del(bp, src_o, data); |
764 | |
765 | /* check if configuration can be added */ |
766 | rc = dst_o->check_add(bp, dst_o, data); |
767 | |
768 | /* If this classification can not be added (is already set) |
769 | * or can't be deleted - return an error. |
770 | */ |
771 | if (rc || !pos) |
772 | return false; |
773 | |
774 | return true; |
775 | } |
776 | |
777 | static bool bnx2x_check_move_always_err( |
778 | struct bnx2x *bp, |
779 | struct bnx2x_vlan_mac_obj *src_o, |
780 | struct bnx2x_vlan_mac_obj *dst_o, |
781 | union bnx2x_classification_ramrod_data *data) |
782 | { |
783 | return false; |
784 | } |
785 | |
786 | static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) |
787 | { |
788 | struct bnx2x_raw_obj *raw = &o->raw; |
789 | u8 rx_tx_flag = 0; |
790 | |
791 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || |
792 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) |
793 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; |
794 | |
795 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || |
796 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) |
797 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; |
798 | |
799 | return rx_tx_flag; |
800 | } |
801 | |
802 | static void bnx2x_set_mac_in_nig(struct bnx2x *bp, |
803 | bool add, unsigned char *dev_addr, int index) |
804 | { |
805 | u32 wb_data[2]; |
806 | u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : |
807 | NIG_REG_LLH0_FUNC_MEM; |
808 | |
809 | if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) |
810 | return; |
811 | |
812 | if (index > BNX2X_LLH_CAM_MAX_PF_LINE) |
813 | return; |
814 | |
815 | DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n" , |
816 | (add ? "ADD" : "DELETE" ), index); |
817 | |
818 | if (add) { |
819 | /* LLH_FUNC_MEM is a u64 WB register */ |
820 | reg_offset += 8*index; |
821 | |
822 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | |
823 | (dev_addr[4] << 8) | dev_addr[5]); |
824 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); |
825 | |
826 | REG_WR_DMAE(bp, reg_offset, wb_data, 2); |
827 | } |
828 | |
829 | REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : |
830 | NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); |
831 | } |
832 | |
833 | /** |
834 | * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod |
835 | * |
836 | * @bp: device handle |
837 | * @o: queue for which we want to configure this rule |
838 | * @add: if true the command is an ADD command, DEL otherwise |
839 | * @opcode: CLASSIFY_RULE_OPCODE_XXX |
840 | * @hdr: pointer to a header to setup |
841 | * |
842 | */ |
843 | static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, |
844 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, |
845 | struct eth_classify_cmd_header *hdr) |
846 | { |
847 | struct bnx2x_raw_obj *raw = &o->raw; |
848 | |
849 | hdr->client_id = raw->cl_id; |
850 | hdr->func_id = raw->func_id; |
851 | |
852 | /* Rx or/and Tx (internal switching) configuration ? */ |
853 | hdr->cmd_general_data |= |
854 | bnx2x_vlan_mac_get_rx_tx_flag(o); |
855 | |
856 | if (add) |
857 | hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; |
858 | |
859 | hdr->cmd_general_data |= |
860 | (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); |
861 | } |
862 | |
863 | /** |
864 | * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header |
865 | * |
866 | * @cid: connection id |
867 | * @type: BNX2X_FILTER_XXX_PENDING |
868 | * @hdr: pointer to header to setup |
869 | * @rule_cnt: |
870 | * |
871 | * currently we always configure one rule and echo field to contain a CID and an |
872 | * opcode type. |
873 | */ |
874 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, |
875 | struct eth_classify_header *hdr, int rule_cnt) |
876 | { |
877 | hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) | |
878 | (type << BNX2X_SWCID_SHIFT)); |
879 | hdr->rule_cnt = (u8)rule_cnt; |
880 | } |
881 | |
882 | /* hw_config() callbacks */ |
883 | static void bnx2x_set_one_mac_e2(struct bnx2x *bp, |
884 | struct bnx2x_vlan_mac_obj *o, |
885 | struct bnx2x_exeq_elem *elem, int rule_idx, |
886 | int cam_offset) |
887 | { |
888 | struct bnx2x_raw_obj *raw = &o->raw; |
889 | struct eth_classify_rules_ramrod_data *data = |
890 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
891 | int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; |
892 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
893 | bool add = cmd == BNX2X_VLAN_MAC_ADD; |
894 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; |
895 | u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; |
896 | |
897 | /* Set LLH CAM entry: currently only iSCSI and ETH macs are |
898 | * relevant. In addition, current implementation is tuned for a |
899 | * single ETH MAC. |
900 | * |
901 | * When multiple unicast ETH MACs PF configuration in switch |
902 | * independent mode is required (NetQ, multiple netdev MACs, |
903 | * etc.), consider better utilisation of 8 per function MAC |
904 | * entries in the LLH register. There is also |
905 | * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the |
906 | * total number of CAM entries to 16. |
907 | * |
908 | * Currently we won't configure NIG for MACs other than a primary ETH |
909 | * MAC and iSCSI L2 MAC. |
910 | * |
911 | * If this MAC is moving from one Queue to another, no need to change |
912 | * NIG configuration. |
913 | */ |
914 | if (cmd != BNX2X_VLAN_MAC_MOVE) { |
915 | if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) |
916 | bnx2x_set_mac_in_nig(bp, add, dev_addr: mac, |
917 | index: BNX2X_LLH_CAM_ISCSI_ETH_LINE); |
918 | else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) |
919 | bnx2x_set_mac_in_nig(bp, add, dev_addr: mac, |
920 | index: BNX2X_LLH_CAM_ETH_LINE); |
921 | } |
922 | |
923 | /* Reset the ramrod data buffer for the first rule */ |
924 | if (rule_idx == 0) |
925 | memset(data, 0, sizeof(*data)); |
926 | |
927 | /* Setup a command header */ |
928 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, opcode: CLASSIFY_RULE_OPCODE_MAC, |
929 | hdr: &rule_entry->mac.header); |
930 | |
931 | DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n" , |
932 | (add ? "add" : "delete" ), mac, raw->cl_id); |
933 | |
934 | /* Set a MAC itself */ |
935 | bnx2x_set_fw_mac_addr(fw_hi: &rule_entry->mac.mac_msb, |
936 | fw_mid: &rule_entry->mac.mac_mid, |
937 | fw_lo: &rule_entry->mac.mac_lsb, mac); |
938 | rule_entry->mac.inner_mac = |
939 | cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); |
940 | |
941 | /* MOVE: Add a rule that will add this MAC to the target Queue */ |
942 | if (cmd == BNX2X_VLAN_MAC_MOVE) { |
943 | rule_entry++; |
944 | rule_cnt++; |
945 | |
946 | /* Setup ramrod data */ |
947 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, |
948 | o: elem->cmd_data.vlan_mac.target_obj, |
949 | add: true, opcode: CLASSIFY_RULE_OPCODE_MAC, |
950 | hdr: &rule_entry->mac.header); |
951 | |
952 | /* Set a MAC itself */ |
953 | bnx2x_set_fw_mac_addr(fw_hi: &rule_entry->mac.mac_msb, |
954 | fw_mid: &rule_entry->mac.mac_mid, |
955 | fw_lo: &rule_entry->mac.mac_lsb, mac); |
956 | rule_entry->mac.inner_mac = |
957 | cpu_to_le16(elem->cmd_data.vlan_mac. |
958 | u.mac.is_inner_mac); |
959 | } |
960 | |
961 | /* Set the ramrod data header */ |
962 | /* TODO: take this to the higher level in order to prevent multiple |
963 | writing */ |
964 | bnx2x_vlan_mac_set_rdata_hdr_e2(cid: raw->cid, type: raw->state, hdr: &data->header, |
965 | rule_cnt); |
966 | } |
967 | |
968 | /** |
969 | * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod |
970 | * |
971 | * @bp: device handle |
972 | * @o: queue |
973 | * @type: the type of echo |
974 | * @cam_offset: offset in cam memory |
975 | * @hdr: pointer to a header to setup |
976 | * |
977 | * E1/E1H |
978 | */ |
979 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, |
980 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, |
981 | struct mac_configuration_hdr *hdr) |
982 | { |
983 | struct bnx2x_raw_obj *r = &o->raw; |
984 | |
985 | hdr->length = 1; |
986 | hdr->offset = (u8)cam_offset; |
987 | hdr->client_id = cpu_to_le16(0xff); |
988 | hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | |
989 | (type << BNX2X_SWCID_SHIFT)); |
990 | } |
991 | |
992 | static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, |
993 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, |
994 | u16 vlan_id, struct mac_configuration_entry *cfg_entry) |
995 | { |
996 | struct bnx2x_raw_obj *r = &o->raw; |
997 | u32 cl_bit_vec = (1 << r->cl_id); |
998 | |
999 | cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); |
1000 | cfg_entry->pf_id = r->func_id; |
1001 | cfg_entry->vlan_id = cpu_to_le16(vlan_id); |
1002 | |
1003 | if (add) { |
1004 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
1005 | T_ETH_MAC_COMMAND_SET); |
1006 | SET_FLAG(cfg_entry->flags, |
1007 | MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); |
1008 | |
1009 | /* Set a MAC in a ramrod data */ |
1010 | bnx2x_set_fw_mac_addr(fw_hi: &cfg_entry->msb_mac_addr, |
1011 | fw_mid: &cfg_entry->middle_mac_addr, |
1012 | fw_lo: &cfg_entry->lsb_mac_addr, mac); |
1013 | } else |
1014 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
1015 | T_ETH_MAC_COMMAND_INVALIDATE); |
1016 | } |
1017 | |
1018 | static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, |
1019 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, |
1020 | u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) |
1021 | { |
1022 | struct mac_configuration_entry *cfg_entry = &config->config_table[0]; |
1023 | struct bnx2x_raw_obj *raw = &o->raw; |
1024 | |
1025 | bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, |
1026 | hdr: &config->hdr); |
1027 | bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, |
1028 | cfg_entry); |
1029 | |
1030 | DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n" , |
1031 | (add ? "setting" : "clearing" ), |
1032 | mac, raw->cl_id, cam_offset); |
1033 | } |
1034 | |
1035 | /** |
1036 | * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data |
1037 | * |
1038 | * @bp: device handle |
1039 | * @o: bnx2x_vlan_mac_obj |
1040 | * @elem: bnx2x_exeq_elem |
1041 | * @rule_idx: rule_idx |
1042 | * @cam_offset: cam_offset |
1043 | */ |
1044 | static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, |
1045 | struct bnx2x_vlan_mac_obj *o, |
1046 | struct bnx2x_exeq_elem *elem, int rule_idx, |
1047 | int cam_offset) |
1048 | { |
1049 | struct bnx2x_raw_obj *raw = &o->raw; |
1050 | struct mac_configuration_cmd *config = |
1051 | (struct mac_configuration_cmd *)(raw->rdata); |
1052 | /* 57710 and 57711 do not support MOVE command, |
1053 | * so it's either ADD or DEL |
1054 | */ |
1055 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
1056 | true : false; |
1057 | |
1058 | /* Reset the ramrod data buffer */ |
1059 | memset(config, 0, sizeof(*config)); |
1060 | |
1061 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, type: raw->state, |
1062 | cam_offset, add, |
1063 | mac: elem->cmd_data.vlan_mac.u.mac.mac, vlan_id: 0, |
1064 | opcode: ETH_VLAN_FILTER_ANY_VLAN, config); |
1065 | } |
1066 | |
1067 | static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, |
1068 | struct bnx2x_vlan_mac_obj *o, |
1069 | struct bnx2x_exeq_elem *elem, int rule_idx, |
1070 | int cam_offset) |
1071 | { |
1072 | struct bnx2x_raw_obj *raw = &o->raw; |
1073 | struct eth_classify_rules_ramrod_data *data = |
1074 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
1075 | int rule_cnt = rule_idx + 1; |
1076 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
1077 | enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
1078 | bool add = cmd == BNX2X_VLAN_MAC_ADD; |
1079 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; |
1080 | |
1081 | /* Reset the ramrod data buffer for the first rule */ |
1082 | if (rule_idx == 0) |
1083 | memset(data, 0, sizeof(*data)); |
1084 | |
1085 | /* Set a rule header */ |
1086 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, opcode: CLASSIFY_RULE_OPCODE_VLAN, |
1087 | hdr: &rule_entry->vlan.header); |
1088 | |
1089 | DP(BNX2X_MSG_SP, "About to %s VLAN %d\n" , (add ? "add" : "delete" ), |
1090 | vlan); |
1091 | |
1092 | /* Set a VLAN itself */ |
1093 | rule_entry->vlan.vlan = cpu_to_le16(vlan); |
1094 | |
1095 | /* MOVE: Add a rule that will add this MAC to the target Queue */ |
1096 | if (cmd == BNX2X_VLAN_MAC_MOVE) { |
1097 | rule_entry++; |
1098 | rule_cnt++; |
1099 | |
1100 | /* Setup ramrod data */ |
1101 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, |
1102 | o: elem->cmd_data.vlan_mac.target_obj, |
1103 | add: true, opcode: CLASSIFY_RULE_OPCODE_VLAN, |
1104 | hdr: &rule_entry->vlan.header); |
1105 | |
1106 | /* Set a VLAN itself */ |
1107 | rule_entry->vlan.vlan = cpu_to_le16(vlan); |
1108 | } |
1109 | |
1110 | /* Set the ramrod data header */ |
1111 | /* TODO: take this to the higher level in order to prevent multiple |
1112 | writing */ |
1113 | bnx2x_vlan_mac_set_rdata_hdr_e2(cid: raw->cid, type: raw->state, hdr: &data->header, |
1114 | rule_cnt); |
1115 | } |
1116 | |
1117 | static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, |
1118 | struct bnx2x_vlan_mac_obj *o, |
1119 | struct bnx2x_exeq_elem *elem, |
1120 | int rule_idx, int cam_offset) |
1121 | { |
1122 | struct bnx2x_raw_obj *raw = &o->raw; |
1123 | struct eth_classify_rules_ramrod_data *data = |
1124 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
1125 | int rule_cnt = rule_idx + 1; |
1126 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
1127 | enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
1128 | bool add = cmd == BNX2X_VLAN_MAC_ADD; |
1129 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; |
1130 | u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; |
1131 | u16 inner_mac; |
1132 | |
1133 | /* Reset the ramrod data buffer for the first rule */ |
1134 | if (rule_idx == 0) |
1135 | memset(data, 0, sizeof(*data)); |
1136 | |
1137 | /* Set a rule header */ |
1138 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, opcode: CLASSIFY_RULE_OPCODE_PAIR, |
1139 | hdr: &rule_entry->pair.header); |
1140 | |
1141 | /* Set VLAN and MAC themselves */ |
1142 | rule_entry->pair.vlan = cpu_to_le16(vlan); |
1143 | bnx2x_set_fw_mac_addr(fw_hi: &rule_entry->pair.mac_msb, |
1144 | fw_mid: &rule_entry->pair.mac_mid, |
1145 | fw_lo: &rule_entry->pair.mac_lsb, mac); |
1146 | inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; |
1147 | rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); |
1148 | /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */ |
1149 | if (cmd == BNX2X_VLAN_MAC_MOVE) { |
1150 | struct bnx2x_vlan_mac_obj *target_obj; |
1151 | |
1152 | rule_entry++; |
1153 | rule_cnt++; |
1154 | |
1155 | /* Setup ramrod data */ |
1156 | target_obj = elem->cmd_data.vlan_mac.target_obj; |
1157 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o: target_obj, |
1158 | add: true, opcode: CLASSIFY_RULE_OPCODE_PAIR, |
1159 | hdr: &rule_entry->pair.header); |
1160 | |
1161 | /* Set a VLAN itself */ |
1162 | rule_entry->pair.vlan = cpu_to_le16(vlan); |
1163 | bnx2x_set_fw_mac_addr(fw_hi: &rule_entry->pair.mac_msb, |
1164 | fw_mid: &rule_entry->pair.mac_mid, |
1165 | fw_lo: &rule_entry->pair.mac_lsb, mac); |
1166 | rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); |
1167 | } |
1168 | |
1169 | /* Set the ramrod data header */ |
1170 | bnx2x_vlan_mac_set_rdata_hdr_e2(cid: raw->cid, type: raw->state, hdr: &data->header, |
1171 | rule_cnt); |
1172 | } |
1173 | |
1174 | /** |
1175 | * bnx2x_set_one_vlan_mac_e1h - |
1176 | * |
1177 | * @bp: device handle |
1178 | * @o: bnx2x_vlan_mac_obj |
1179 | * @elem: bnx2x_exeq_elem |
1180 | * @rule_idx: rule_idx |
1181 | * @cam_offset: cam_offset |
1182 | */ |
1183 | static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, |
1184 | struct bnx2x_vlan_mac_obj *o, |
1185 | struct bnx2x_exeq_elem *elem, |
1186 | int rule_idx, int cam_offset) |
1187 | { |
1188 | struct bnx2x_raw_obj *raw = &o->raw; |
1189 | struct mac_configuration_cmd *config = |
1190 | (struct mac_configuration_cmd *)(raw->rdata); |
1191 | /* 57710 and 57711 do not support MOVE command, |
1192 | * so it's either ADD or DEL |
1193 | */ |
1194 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
1195 | true : false; |
1196 | |
1197 | /* Reset the ramrod data buffer */ |
1198 | memset(config, 0, sizeof(*config)); |
1199 | |
1200 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, type: BNX2X_FILTER_VLAN_MAC_PENDING, |
1201 | cam_offset, add, |
1202 | mac: elem->cmd_data.vlan_mac.u.vlan_mac.mac, |
1203 | vlan_id: elem->cmd_data.vlan_mac.u.vlan_mac.vlan, |
1204 | opcode: ETH_VLAN_FILTER_CLASSIFY, config); |
1205 | } |
1206 | |
1207 | /** |
1208 | * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element |
1209 | * |
1210 | * @bp: device handle |
1211 | * @p: command parameters |
1212 | * @ppos: pointer to the cookie |
1213 | * |
1214 | * reconfigure next MAC/VLAN/VLAN-MAC element from the |
1215 | * previously configured elements list. |
1216 | * |
1217 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken |
1218 | * into an account |
1219 | * |
1220 | * pointer to the cookie - that should be given back in the next call to make |
1221 | * function handle the next element. If *ppos is set to NULL it will restart the |
1222 | * iterator. If returned *ppos == NULL this means that the last element has been |
1223 | * handled. |
1224 | * |
1225 | */ |
1226 | static int bnx2x_vlan_mac_restore(struct bnx2x *bp, |
1227 | struct bnx2x_vlan_mac_ramrod_params *p, |
1228 | struct bnx2x_vlan_mac_registry_elem **ppos) |
1229 | { |
1230 | struct bnx2x_vlan_mac_registry_elem *pos; |
1231 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; |
1232 | |
1233 | /* If list is empty - there is nothing to do here */ |
1234 | if (list_empty(head: &o->head)) { |
1235 | *ppos = NULL; |
1236 | return 0; |
1237 | } |
1238 | |
1239 | /* make a step... */ |
1240 | if (*ppos == NULL) |
1241 | *ppos = list_first_entry(&o->head, |
1242 | struct bnx2x_vlan_mac_registry_elem, |
1243 | link); |
1244 | else |
1245 | *ppos = list_next_entry(*ppos, link); |
1246 | |
1247 | pos = *ppos; |
1248 | |
1249 | /* If it's the last step - return NULL */ |
1250 | if (list_is_last(list: &pos->link, head: &o->head)) |
1251 | *ppos = NULL; |
1252 | |
1253 | /* Prepare a 'user_req' */ |
1254 | memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); |
1255 | |
1256 | /* Set the command */ |
1257 | p->user_req.cmd = BNX2X_VLAN_MAC_ADD; |
1258 | |
1259 | /* Set vlan_mac_flags */ |
1260 | p->user_req.vlan_mac_flags = pos->vlan_mac_flags; |
1261 | |
1262 | /* Set a restore bit */ |
1263 | __set_bit(RAMROD_RESTORE, &p->ramrod_flags); |
1264 | |
1265 | return bnx2x_config_vlan_mac(bp, p); |
1266 | } |
1267 | |
1268 | /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a |
1269 | * pointer to an element with a specific criteria and NULL if such an element |
1270 | * hasn't been found. |
1271 | */ |
1272 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( |
1273 | struct bnx2x_exe_queue_obj *o, |
1274 | struct bnx2x_exeq_elem *elem) |
1275 | { |
1276 | struct bnx2x_exeq_elem *pos; |
1277 | struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; |
1278 | |
1279 | /* Check pending for execution commands */ |
1280 | list_for_each_entry(pos, &o->exe_queue, link) |
1281 | if (!memcmp(p: &pos->cmd_data.vlan_mac.u.mac, q: data, |
1282 | size: sizeof(*data)) && |
1283 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) |
1284 | return pos; |
1285 | |
1286 | return NULL; |
1287 | } |
1288 | |
1289 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( |
1290 | struct bnx2x_exe_queue_obj *o, |
1291 | struct bnx2x_exeq_elem *elem) |
1292 | { |
1293 | struct bnx2x_exeq_elem *pos; |
1294 | struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; |
1295 | |
1296 | /* Check pending for execution commands */ |
1297 | list_for_each_entry(pos, &o->exe_queue, link) |
1298 | if (!memcmp(p: &pos->cmd_data.vlan_mac.u.vlan, q: data, |
1299 | size: sizeof(*data)) && |
1300 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) |
1301 | return pos; |
1302 | |
1303 | return NULL; |
1304 | } |
1305 | |
1306 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( |
1307 | struct bnx2x_exe_queue_obj *o, |
1308 | struct bnx2x_exeq_elem *elem) |
1309 | { |
1310 | struct bnx2x_exeq_elem *pos; |
1311 | struct bnx2x_vlan_mac_ramrod_data *data = |
1312 | &elem->cmd_data.vlan_mac.u.vlan_mac; |
1313 | |
1314 | /* Check pending for execution commands */ |
1315 | list_for_each_entry(pos, &o->exe_queue, link) |
1316 | if (!memcmp(p: &pos->cmd_data.vlan_mac.u.vlan_mac, q: data, |
1317 | size: sizeof(*data)) && |
1318 | (pos->cmd_data.vlan_mac.cmd == |
1319 | elem->cmd_data.vlan_mac.cmd)) |
1320 | return pos; |
1321 | |
1322 | return NULL; |
1323 | } |
1324 | |
1325 | /** |
1326 | * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed |
1327 | * |
1328 | * @bp: device handle |
1329 | * @qo: bnx2x_qable_obj |
1330 | * @elem: bnx2x_exeq_elem |
1331 | * |
1332 | * Checks that the requested configuration can be added. If yes and if |
1333 | * requested, consume CAM credit. |
1334 | * |
1335 | * The 'validate' is run after the 'optimize'. |
1336 | * |
1337 | */ |
1338 | static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, |
1339 | union bnx2x_qable_obj *qo, |
1340 | struct bnx2x_exeq_elem *elem) |
1341 | { |
1342 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; |
1343 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
1344 | int rc; |
1345 | |
1346 | /* Check the registry */ |
1347 | rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); |
1348 | if (rc) { |
1349 | DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n" ); |
1350 | return rc; |
1351 | } |
1352 | |
1353 | /* Check if there is a pending ADD command for this |
1354 | * MAC/VLAN/VLAN-MAC. Return an error if there is. |
1355 | */ |
1356 | if (exeq->get(exeq, elem)) { |
1357 | DP(BNX2X_MSG_SP, "There is a pending ADD command already\n" ); |
1358 | return -EEXIST; |
1359 | } |
1360 | |
1361 | /* TODO: Check the pending MOVE from other objects where this |
1362 | * object is a destination object. |
1363 | */ |
1364 | |
1365 | /* Consume the credit if not requested not to */ |
1366 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
1367 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
1368 | o->get_credit(o))) |
1369 | return -EINVAL; |
1370 | |
1371 | return 0; |
1372 | } |
1373 | |
1374 | /** |
1375 | * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed |
1376 | * |
1377 | * @bp: device handle |
1378 | * @qo: quable object to check |
1379 | * @elem: element that needs to be deleted |
1380 | * |
1381 | * Checks that the requested configuration can be deleted. If yes and if |
1382 | * requested, returns a CAM credit. |
1383 | * |
1384 | * The 'validate' is run after the 'optimize'. |
1385 | */ |
1386 | static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, |
1387 | union bnx2x_qable_obj *qo, |
1388 | struct bnx2x_exeq_elem *elem) |
1389 | { |
1390 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; |
1391 | struct bnx2x_vlan_mac_registry_elem *pos; |
1392 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
1393 | struct bnx2x_exeq_elem query_elem; |
1394 | |
1395 | /* If this classification can not be deleted (doesn't exist) |
1396 | * - return a BNX2X_EXIST. |
1397 | */ |
1398 | pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); |
1399 | if (!pos) { |
1400 | DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n" ); |
1401 | return -EEXIST; |
1402 | } |
1403 | |
1404 | /* Check if there are pending DEL or MOVE commands for this |
1405 | * MAC/VLAN/VLAN-MAC. Return an error if so. |
1406 | */ |
1407 | memcpy(&query_elem, elem, sizeof(query_elem)); |
1408 | |
1409 | /* Check for MOVE commands */ |
1410 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; |
1411 | if (exeq->get(exeq, &query_elem)) { |
1412 | BNX2X_ERR("There is a pending MOVE command already\n" ); |
1413 | return -EINVAL; |
1414 | } |
1415 | |
1416 | /* Check for DEL commands */ |
1417 | if (exeq->get(exeq, elem)) { |
1418 | DP(BNX2X_MSG_SP, "There is a pending DEL command already\n" ); |
1419 | return -EEXIST; |
1420 | } |
1421 | |
1422 | /* Return the credit to the credit pool if not requested not to */ |
1423 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
1424 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
1425 | o->put_credit(o))) { |
1426 | BNX2X_ERR("Failed to return a credit\n" ); |
1427 | return -EINVAL; |
1428 | } |
1429 | |
1430 | return 0; |
1431 | } |
1432 | |
1433 | /** |
1434 | * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed |
1435 | * |
1436 | * @bp: device handle |
1437 | * @qo: quable object to check (source) |
1438 | * @elem: element that needs to be moved |
1439 | * |
1440 | * Checks that the requested configuration can be moved. If yes and if |
1441 | * requested, returns a CAM credit. |
1442 | * |
1443 | * The 'validate' is run after the 'optimize'. |
1444 | */ |
1445 | static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, |
1446 | union bnx2x_qable_obj *qo, |
1447 | struct bnx2x_exeq_elem *elem) |
1448 | { |
1449 | struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; |
1450 | struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; |
1451 | struct bnx2x_exeq_elem query_elem; |
1452 | struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; |
1453 | struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; |
1454 | |
1455 | /* Check if we can perform this operation based on the current registry |
1456 | * state. |
1457 | */ |
1458 | if (!src_o->check_move(bp, src_o, dest_o, |
1459 | &elem->cmd_data.vlan_mac.u)) { |
1460 | DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n" ); |
1461 | return -EINVAL; |
1462 | } |
1463 | |
1464 | /* Check if there is an already pending DEL or MOVE command for the |
1465 | * source object or ADD command for a destination object. Return an |
1466 | * error if so. |
1467 | */ |
1468 | memcpy(&query_elem, elem, sizeof(query_elem)); |
1469 | |
1470 | /* Check DEL on source */ |
1471 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; |
1472 | if (src_exeq->get(src_exeq, &query_elem)) { |
1473 | BNX2X_ERR("There is a pending DEL command on the source queue already\n" ); |
1474 | return -EINVAL; |
1475 | } |
1476 | |
1477 | /* Check MOVE on source */ |
1478 | if (src_exeq->get(src_exeq, elem)) { |
1479 | DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n" ); |
1480 | return -EEXIST; |
1481 | } |
1482 | |
1483 | /* Check ADD on destination */ |
1484 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; |
1485 | if (dest_exeq->get(dest_exeq, &query_elem)) { |
1486 | BNX2X_ERR("There is a pending ADD command on the destination queue already\n" ); |
1487 | return -EINVAL; |
1488 | } |
1489 | |
1490 | /* Consume the credit if not requested not to */ |
1491 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, |
1492 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
1493 | dest_o->get_credit(dest_o))) |
1494 | return -EINVAL; |
1495 | |
1496 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
1497 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || |
1498 | src_o->put_credit(src_o))) { |
1499 | /* return the credit taken from dest... */ |
1500 | dest_o->put_credit(dest_o); |
1501 | return -EINVAL; |
1502 | } |
1503 | |
1504 | return 0; |
1505 | } |
1506 | |
1507 | static int bnx2x_validate_vlan_mac(struct bnx2x *bp, |
1508 | union bnx2x_qable_obj *qo, |
1509 | struct bnx2x_exeq_elem *elem) |
1510 | { |
1511 | switch (elem->cmd_data.vlan_mac.cmd) { |
1512 | case BNX2X_VLAN_MAC_ADD: |
1513 | return bnx2x_validate_vlan_mac_add(bp, qo, elem); |
1514 | case BNX2X_VLAN_MAC_DEL: |
1515 | return bnx2x_validate_vlan_mac_del(bp, qo, elem); |
1516 | case BNX2X_VLAN_MAC_MOVE: |
1517 | return bnx2x_validate_vlan_mac_move(bp, qo, elem); |
1518 | default: |
1519 | return -EINVAL; |
1520 | } |
1521 | } |
1522 | |
1523 | static int bnx2x_remove_vlan_mac(struct bnx2x *bp, |
1524 | union bnx2x_qable_obj *qo, |
1525 | struct bnx2x_exeq_elem *elem) |
1526 | { |
1527 | int rc = 0; |
1528 | |
1529 | /* If consumption wasn't required, nothing to do */ |
1530 | if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
1531 | &elem->cmd_data.vlan_mac.vlan_mac_flags)) |
1532 | return 0; |
1533 | |
1534 | switch (elem->cmd_data.vlan_mac.cmd) { |
1535 | case BNX2X_VLAN_MAC_ADD: |
1536 | case BNX2X_VLAN_MAC_MOVE: |
1537 | rc = qo->vlan_mac.put_credit(&qo->vlan_mac); |
1538 | break; |
1539 | case BNX2X_VLAN_MAC_DEL: |
1540 | rc = qo->vlan_mac.get_credit(&qo->vlan_mac); |
1541 | break; |
1542 | default: |
1543 | return -EINVAL; |
1544 | } |
1545 | |
1546 | if (rc != true) |
1547 | return -EINVAL; |
1548 | |
1549 | return 0; |
1550 | } |
1551 | |
1552 | /** |
1553 | * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. |
1554 | * |
1555 | * @bp: device handle |
1556 | * @o: bnx2x_vlan_mac_obj |
1557 | * |
1558 | */ |
1559 | static int bnx2x_wait_vlan_mac(struct bnx2x *bp, |
1560 | struct bnx2x_vlan_mac_obj *o) |
1561 | { |
1562 | int cnt = 5000, rc; |
1563 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
1564 | struct bnx2x_raw_obj *raw = &o->raw; |
1565 | |
1566 | while (cnt--) { |
1567 | /* Wait for the current command to complete */ |
1568 | rc = raw->wait_comp(bp, raw); |
1569 | if (rc) |
1570 | return rc; |
1571 | |
1572 | /* Wait until there are no pending commands */ |
1573 | if (!bnx2x_exe_queue_empty(o: exeq)) |
1574 | usleep_range(min: 1000, max: 2000); |
1575 | else |
1576 | return 0; |
1577 | } |
1578 | |
1579 | return -EBUSY; |
1580 | } |
1581 | |
1582 | static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, |
1583 | struct bnx2x_vlan_mac_obj *o, |
1584 | unsigned long *ramrod_flags) |
1585 | { |
1586 | int rc = 0; |
1587 | |
1588 | spin_lock_bh(lock: &o->exe_queue.lock); |
1589 | |
1590 | DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n" ); |
1591 | rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); |
1592 | |
1593 | if (rc != 0) { |
1594 | __bnx2x_vlan_mac_h_pend(bp, o, ramrod_flags: *ramrod_flags); |
1595 | |
1596 | /* Calling function should not differentiate between this case |
1597 | * and the case in which there is already a pending ramrod |
1598 | */ |
1599 | rc = 1; |
1600 | } else { |
1601 | rc = bnx2x_exe_queue_step(bp, o: &o->exe_queue, ramrod_flags); |
1602 | } |
1603 | spin_unlock_bh(lock: &o->exe_queue.lock); |
1604 | |
1605 | return rc; |
1606 | } |
1607 | |
1608 | /** |
1609 | * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod |
1610 | * |
1611 | * @bp: device handle |
1612 | * @o: bnx2x_vlan_mac_obj |
1613 | * @cqe: completion element |
1614 | * @ramrod_flags: if set schedule next execution chunk |
1615 | * |
1616 | */ |
1617 | static int bnx2x_complete_vlan_mac(struct bnx2x *bp, |
1618 | struct bnx2x_vlan_mac_obj *o, |
1619 | union event_ring_elem *cqe, |
1620 | unsigned long *ramrod_flags) |
1621 | { |
1622 | struct bnx2x_raw_obj *r = &o->raw; |
1623 | int rc; |
1624 | |
1625 | /* Clearing the pending list & raw state should be made |
1626 | * atomically (as execution flow assumes they represent the same). |
1627 | */ |
1628 | spin_lock_bh(lock: &o->exe_queue.lock); |
1629 | |
1630 | /* Reset pending list */ |
1631 | __bnx2x_exe_queue_reset_pending(bp, o: &o->exe_queue); |
1632 | |
1633 | /* Clear pending */ |
1634 | r->clear_pending(r); |
1635 | |
1636 | spin_unlock_bh(lock: &o->exe_queue.lock); |
1637 | |
1638 | /* If ramrod failed this is most likely a SW bug */ |
1639 | if (cqe->message.error) |
1640 | return -EINVAL; |
1641 | |
1642 | /* Run the next bulk of pending commands if requested */ |
1643 | if (test_bit(RAMROD_CONT, ramrod_flags)) { |
1644 | rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); |
1645 | |
1646 | if (rc < 0) |
1647 | return rc; |
1648 | } |
1649 | |
1650 | /* If there is more work to do return PENDING */ |
1651 | if (!bnx2x_exe_queue_empty(o: &o->exe_queue)) |
1652 | return 1; |
1653 | |
1654 | return 0; |
1655 | } |
1656 | |
1657 | /** |
1658 | * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. |
1659 | * |
1660 | * @bp: device handle |
1661 | * @qo: bnx2x_qable_obj |
1662 | * @elem: bnx2x_exeq_elem |
1663 | */ |
1664 | static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, |
1665 | union bnx2x_qable_obj *qo, |
1666 | struct bnx2x_exeq_elem *elem) |
1667 | { |
1668 | struct bnx2x_exeq_elem query, *pos; |
1669 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; |
1670 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
1671 | |
1672 | memcpy(&query, elem, sizeof(query)); |
1673 | |
1674 | switch (elem->cmd_data.vlan_mac.cmd) { |
1675 | case BNX2X_VLAN_MAC_ADD: |
1676 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; |
1677 | break; |
1678 | case BNX2X_VLAN_MAC_DEL: |
1679 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; |
1680 | break; |
1681 | default: |
1682 | /* Don't handle anything other than ADD or DEL */ |
1683 | return 0; |
1684 | } |
1685 | |
1686 | /* If we found the appropriate element - delete it */ |
1687 | pos = exeq->get(exeq, &query); |
1688 | if (pos) { |
1689 | |
1690 | /* Return the credit of the optimized command */ |
1691 | if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
1692 | &pos->cmd_data.vlan_mac.vlan_mac_flags)) { |
1693 | if ((query.cmd_data.vlan_mac.cmd == |
1694 | BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { |
1695 | BNX2X_ERR("Failed to return the credit for the optimized ADD command\n" ); |
1696 | return -EINVAL; |
1697 | } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ |
1698 | BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n" ); |
1699 | return -EINVAL; |
1700 | } |
1701 | } |
1702 | |
1703 | DP(BNX2X_MSG_SP, "Optimizing %s command\n" , |
1704 | (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
1705 | "ADD" : "DEL" ); |
1706 | |
1707 | list_del(entry: &pos->link); |
1708 | bnx2x_exe_queue_free_elem(bp, elem: pos); |
1709 | return 1; |
1710 | } |
1711 | |
1712 | return 0; |
1713 | } |
1714 | |
1715 | /** |
1716 | * bnx2x_vlan_mac_get_registry_elem - prepare a registry element |
1717 | * |
1718 | * @bp: device handle |
1719 | * @o: vlan object |
1720 | * @elem: element |
1721 | * @restore: to restore or not |
1722 | * @re: registry |
1723 | * |
1724 | * prepare a registry element according to the current command request. |
1725 | */ |
1726 | static inline int bnx2x_vlan_mac_get_registry_elem( |
1727 | struct bnx2x *bp, |
1728 | struct bnx2x_vlan_mac_obj *o, |
1729 | struct bnx2x_exeq_elem *elem, |
1730 | bool restore, |
1731 | struct bnx2x_vlan_mac_registry_elem **re) |
1732 | { |
1733 | enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; |
1734 | struct bnx2x_vlan_mac_registry_elem *reg_elem; |
1735 | |
1736 | /* Allocate a new registry element if needed. */ |
1737 | if (!restore && |
1738 | ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { |
1739 | reg_elem = kzalloc(size: sizeof(*reg_elem), GFP_ATOMIC); |
1740 | if (!reg_elem) |
1741 | return -ENOMEM; |
1742 | |
1743 | /* Get a new CAM offset */ |
1744 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { |
1745 | /* This shall never happen, because we have checked the |
1746 | * CAM availability in the 'validate'. |
1747 | */ |
1748 | WARN_ON(1); |
1749 | kfree(objp: reg_elem); |
1750 | return -EINVAL; |
1751 | } |
1752 | |
1753 | DP(BNX2X_MSG_SP, "Got cam offset %d\n" , reg_elem->cam_offset); |
1754 | |
1755 | /* Set a VLAN-MAC data */ |
1756 | memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, |
1757 | sizeof(reg_elem->u)); |
1758 | |
1759 | /* Copy the flags (needed for DEL and RESTORE flows) */ |
1760 | reg_elem->vlan_mac_flags = |
1761 | elem->cmd_data.vlan_mac.vlan_mac_flags; |
1762 | } else /* DEL, RESTORE */ |
1763 | reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); |
1764 | |
1765 | *re = reg_elem; |
1766 | return 0; |
1767 | } |
1768 | |
1769 | /** |
1770 | * bnx2x_execute_vlan_mac - execute vlan mac command |
1771 | * |
1772 | * @bp: device handle |
1773 | * @qo: bnx2x_qable_obj pointer |
1774 | * @exe_chunk: chunk |
1775 | * @ramrod_flags: flags |
1776 | * |
1777 | * go and send a ramrod! |
1778 | */ |
1779 | static int bnx2x_execute_vlan_mac(struct bnx2x *bp, |
1780 | union bnx2x_qable_obj *qo, |
1781 | struct list_head *exe_chunk, |
1782 | unsigned long *ramrod_flags) |
1783 | { |
1784 | struct bnx2x_exeq_elem *elem; |
1785 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; |
1786 | struct bnx2x_raw_obj *r = &o->raw; |
1787 | int rc, idx = 0; |
1788 | bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); |
1789 | bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); |
1790 | struct bnx2x_vlan_mac_registry_elem *reg_elem; |
1791 | enum bnx2x_vlan_mac_cmd cmd; |
1792 | |
1793 | /* If DRIVER_ONLY execution is requested, cleanup a registry |
1794 | * and exit. Otherwise send a ramrod to FW. |
1795 | */ |
1796 | if (!drv_only) { |
1797 | WARN_ON(r->check_pending(r)); |
1798 | |
1799 | /* Set pending */ |
1800 | r->set_pending(r); |
1801 | |
1802 | /* Fill the ramrod data */ |
1803 | list_for_each_entry(elem, exe_chunk, link) { |
1804 | cmd = elem->cmd_data.vlan_mac.cmd; |
1805 | /* We will add to the target object in MOVE command, so |
1806 | * change the object for a CAM search. |
1807 | */ |
1808 | if (cmd == BNX2X_VLAN_MAC_MOVE) |
1809 | cam_obj = elem->cmd_data.vlan_mac.target_obj; |
1810 | else |
1811 | cam_obj = o; |
1812 | |
1813 | rc = bnx2x_vlan_mac_get_registry_elem(bp, o: cam_obj, |
1814 | elem, restore, |
1815 | re: ®_elem); |
1816 | if (rc) |
1817 | goto error_exit; |
1818 | |
1819 | WARN_ON(!reg_elem); |
1820 | |
1821 | /* Push a new entry into the registry */ |
1822 | if (!restore && |
1823 | ((cmd == BNX2X_VLAN_MAC_ADD) || |
1824 | (cmd == BNX2X_VLAN_MAC_MOVE))) |
1825 | list_add(new: ®_elem->link, head: &cam_obj->head); |
1826 | |
1827 | /* Configure a single command in a ramrod data buffer */ |
1828 | o->set_one_rule(bp, o, elem, idx, |
1829 | reg_elem->cam_offset); |
1830 | |
1831 | /* MOVE command consumes 2 entries in the ramrod data */ |
1832 | if (cmd == BNX2X_VLAN_MAC_MOVE) |
1833 | idx += 2; |
1834 | else |
1835 | idx++; |
1836 | } |
1837 | |
1838 | /* No need for an explicit memory barrier here as long we would |
1839 | * need to ensure the ordering of writing to the SPQ element |
1840 | * and updating of the SPQ producer which involves a memory |
1841 | * read and we will have to put a full memory barrier there |
1842 | * (inside bnx2x_sp_post()). |
1843 | */ |
1844 | |
1845 | rc = bnx2x_sp_post(bp, command: o->ramrod_cmd, cid: r->cid, |
1846 | U64_HI(r->rdata_mapping), |
1847 | U64_LO(r->rdata_mapping), |
1848 | cmd_type: ETH_CONNECTION_TYPE); |
1849 | if (rc) |
1850 | goto error_exit; |
1851 | } |
1852 | |
1853 | /* Now, when we are done with the ramrod - clean up the registry */ |
1854 | list_for_each_entry(elem, exe_chunk, link) { |
1855 | cmd = elem->cmd_data.vlan_mac.cmd; |
1856 | if ((cmd == BNX2X_VLAN_MAC_DEL) || |
1857 | (cmd == BNX2X_VLAN_MAC_MOVE)) { |
1858 | reg_elem = o->check_del(bp, o, |
1859 | &elem->cmd_data.vlan_mac.u); |
1860 | |
1861 | WARN_ON(!reg_elem); |
1862 | |
1863 | o->put_cam_offset(o, reg_elem->cam_offset); |
1864 | list_del(entry: ®_elem->link); |
1865 | kfree(objp: reg_elem); |
1866 | } |
1867 | } |
1868 | |
1869 | if (!drv_only) |
1870 | return 1; |
1871 | else |
1872 | return 0; |
1873 | |
1874 | error_exit: |
1875 | r->clear_pending(r); |
1876 | |
1877 | /* Cleanup a registry in case of a failure */ |
1878 | list_for_each_entry(elem, exe_chunk, link) { |
1879 | cmd = elem->cmd_data.vlan_mac.cmd; |
1880 | |
1881 | if (cmd == BNX2X_VLAN_MAC_MOVE) |
1882 | cam_obj = elem->cmd_data.vlan_mac.target_obj; |
1883 | else |
1884 | cam_obj = o; |
1885 | |
1886 | /* Delete all newly added above entries */ |
1887 | if (!restore && |
1888 | ((cmd == BNX2X_VLAN_MAC_ADD) || |
1889 | (cmd == BNX2X_VLAN_MAC_MOVE))) { |
1890 | reg_elem = o->check_del(bp, cam_obj, |
1891 | &elem->cmd_data.vlan_mac.u); |
1892 | if (reg_elem) { |
1893 | list_del(entry: ®_elem->link); |
1894 | kfree(objp: reg_elem); |
1895 | } |
1896 | } |
1897 | } |
1898 | |
1899 | return rc; |
1900 | } |
1901 | |
1902 | static inline int bnx2x_vlan_mac_push_new_cmd( |
1903 | struct bnx2x *bp, |
1904 | struct bnx2x_vlan_mac_ramrod_params *p) |
1905 | { |
1906 | struct bnx2x_exeq_elem *elem; |
1907 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; |
1908 | bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); |
1909 | |
1910 | /* Allocate the execution queue element */ |
1911 | elem = bnx2x_exe_queue_alloc_elem(bp); |
1912 | if (!elem) |
1913 | return -ENOMEM; |
1914 | |
1915 | /* Set the command 'length' */ |
1916 | switch (p->user_req.cmd) { |
1917 | case BNX2X_VLAN_MAC_MOVE: |
1918 | elem->cmd_len = 2; |
1919 | break; |
1920 | default: |
1921 | elem->cmd_len = 1; |
1922 | } |
1923 | |
1924 | /* Fill the object specific info */ |
1925 | memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); |
1926 | |
1927 | /* Try to add a new command to the pending list */ |
1928 | return bnx2x_exe_queue_add(bp, o: &o->exe_queue, elem, restore); |
1929 | } |
1930 | |
1931 | /** |
1932 | * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. |
1933 | * |
1934 | * @bp: device handle |
1935 | * @p: |
1936 | * |
1937 | */ |
1938 | int bnx2x_config_vlan_mac(struct bnx2x *bp, |
1939 | struct bnx2x_vlan_mac_ramrod_params *p) |
1940 | { |
1941 | int rc = 0; |
1942 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; |
1943 | unsigned long *ramrod_flags = &p->ramrod_flags; |
1944 | bool cont = test_bit(RAMROD_CONT, ramrod_flags); |
1945 | struct bnx2x_raw_obj *raw = &o->raw; |
1946 | |
1947 | /* |
1948 | * Add new elements to the execution list for commands that require it. |
1949 | */ |
1950 | if (!cont) { |
1951 | rc = bnx2x_vlan_mac_push_new_cmd(bp, p); |
1952 | if (rc) |
1953 | return rc; |
1954 | } |
1955 | |
1956 | /* If nothing will be executed further in this iteration we want to |
1957 | * return PENDING if there are pending commands |
1958 | */ |
1959 | if (!bnx2x_exe_queue_empty(o: &o->exe_queue)) |
1960 | rc = 1; |
1961 | |
1962 | if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { |
1963 | DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n" ); |
1964 | raw->clear_pending(raw); |
1965 | } |
1966 | |
1967 | /* Execute commands if required */ |
1968 | if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || |
1969 | test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { |
1970 | rc = __bnx2x_vlan_mac_execute_step(bp, o: p->vlan_mac_obj, |
1971 | ramrod_flags: &p->ramrod_flags); |
1972 | if (rc < 0) |
1973 | return rc; |
1974 | } |
1975 | |
1976 | /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set |
1977 | * then user want to wait until the last command is done. |
1978 | */ |
1979 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { |
1980 | /* Wait maximum for the current exe_queue length iterations plus |
1981 | * one (for the current pending command). |
1982 | */ |
1983 | int max_iterations = bnx2x_exe_queue_length(o: &o->exe_queue) + 1; |
1984 | |
1985 | while (!bnx2x_exe_queue_empty(o: &o->exe_queue) && |
1986 | max_iterations--) { |
1987 | |
1988 | /* Wait for the current command to complete */ |
1989 | rc = raw->wait_comp(bp, raw); |
1990 | if (rc) |
1991 | return rc; |
1992 | |
1993 | /* Make a next step */ |
1994 | rc = __bnx2x_vlan_mac_execute_step(bp, |
1995 | o: p->vlan_mac_obj, |
1996 | ramrod_flags: &p->ramrod_flags); |
1997 | if (rc < 0) |
1998 | return rc; |
1999 | } |
2000 | |
2001 | return 0; |
2002 | } |
2003 | |
2004 | return rc; |
2005 | } |
2006 | |
2007 | /** |
2008 | * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec |
2009 | * |
2010 | * @bp: device handle |
2011 | * @o: vlan object info |
2012 | * @vlan_mac_flags: vlan flags |
2013 | * @ramrod_flags: execution flags to be used for this deletion |
2014 | * |
2015 | * if the last operation has completed successfully and there are no |
2016 | * more elements left, positive value if the last operation has completed |
2017 | * successfully and there are more previously configured elements, negative |
2018 | * value is current operation has failed. |
2019 | */ |
2020 | static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, |
2021 | struct bnx2x_vlan_mac_obj *o, |
2022 | unsigned long *vlan_mac_flags, |
2023 | unsigned long *ramrod_flags) |
2024 | { |
2025 | struct bnx2x_vlan_mac_registry_elem *pos = NULL; |
2026 | struct bnx2x_vlan_mac_ramrod_params p; |
2027 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
2028 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; |
2029 | unsigned long flags; |
2030 | int read_lock; |
2031 | int rc = 0; |
2032 | |
2033 | /* Clear pending commands first */ |
2034 | |
2035 | spin_lock_bh(lock: &exeq->lock); |
2036 | |
2037 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { |
2038 | flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; |
2039 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == |
2040 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { |
2041 | rc = exeq->remove(bp, exeq->owner, exeq_pos); |
2042 | if (rc) { |
2043 | BNX2X_ERR("Failed to remove command\n" ); |
2044 | spin_unlock_bh(lock: &exeq->lock); |
2045 | return rc; |
2046 | } |
2047 | list_del(entry: &exeq_pos->link); |
2048 | bnx2x_exe_queue_free_elem(bp, elem: exeq_pos); |
2049 | } |
2050 | } |
2051 | |
2052 | spin_unlock_bh(lock: &exeq->lock); |
2053 | |
2054 | /* Prepare a command request */ |
2055 | memset(&p, 0, sizeof(p)); |
2056 | p.vlan_mac_obj = o; |
2057 | p.ramrod_flags = *ramrod_flags; |
2058 | p.user_req.cmd = BNX2X_VLAN_MAC_DEL; |
2059 | |
2060 | /* Add all but the last VLAN-MAC to the execution queue without actually |
2061 | * execution anything. |
2062 | */ |
2063 | __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); |
2064 | __clear_bit(RAMROD_EXEC, &p.ramrod_flags); |
2065 | __clear_bit(RAMROD_CONT, &p.ramrod_flags); |
2066 | |
2067 | DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n" ); |
2068 | read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); |
2069 | if (read_lock != 0) |
2070 | return read_lock; |
2071 | |
2072 | list_for_each_entry(pos, &o->head, link) { |
2073 | flags = pos->vlan_mac_flags; |
2074 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == |
2075 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { |
2076 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; |
2077 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); |
2078 | rc = bnx2x_config_vlan_mac(bp, p: &p); |
2079 | if (rc < 0) { |
2080 | BNX2X_ERR("Failed to add a new DEL command\n" ); |
2081 | bnx2x_vlan_mac_h_read_unlock(bp, o); |
2082 | return rc; |
2083 | } |
2084 | } |
2085 | } |
2086 | |
2087 | DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n" ); |
2088 | bnx2x_vlan_mac_h_read_unlock(bp, o); |
2089 | |
2090 | p.ramrod_flags = *ramrod_flags; |
2091 | __set_bit(RAMROD_CONT, &p.ramrod_flags); |
2092 | |
2093 | return bnx2x_config_vlan_mac(bp, p: &p); |
2094 | } |
2095 | |
2096 | static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, |
2097 | u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, |
2098 | unsigned long *pstate, bnx2x_obj_type type) |
2099 | { |
2100 | raw->func_id = func_id; |
2101 | raw->cid = cid; |
2102 | raw->cl_id = cl_id; |
2103 | raw->rdata = rdata; |
2104 | raw->rdata_mapping = rdata_mapping; |
2105 | raw->state = state; |
2106 | raw->pstate = pstate; |
2107 | raw->obj_type = type; |
2108 | raw->check_pending = bnx2x_raw_check_pending; |
2109 | raw->clear_pending = bnx2x_raw_clear_pending; |
2110 | raw->set_pending = bnx2x_raw_set_pending; |
2111 | raw->wait_comp = bnx2x_raw_wait; |
2112 | } |
2113 | |
2114 | static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, |
2115 | u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, |
2116 | int state, unsigned long *pstate, bnx2x_obj_type type, |
2117 | struct bnx2x_credit_pool_obj *macs_pool, |
2118 | struct bnx2x_credit_pool_obj *vlans_pool) |
2119 | { |
2120 | INIT_LIST_HEAD(list: &o->head); |
2121 | o->head_reader = 0; |
2122 | o->head_exe_request = false; |
2123 | o->saved_ramrod_flags = 0; |
2124 | |
2125 | o->macs_pool = macs_pool; |
2126 | o->vlans_pool = vlans_pool; |
2127 | |
2128 | o->delete_all = bnx2x_vlan_mac_del_all; |
2129 | o->restore = bnx2x_vlan_mac_restore; |
2130 | o->complete = bnx2x_complete_vlan_mac; |
2131 | o->wait = bnx2x_wait_vlan_mac; |
2132 | |
2133 | bnx2x_init_raw_obj(raw: &o->raw, cl_id, cid, func_id, rdata, rdata_mapping, |
2134 | state, pstate, type); |
2135 | } |
2136 | |
2137 | void bnx2x_init_mac_obj(struct bnx2x *bp, |
2138 | struct bnx2x_vlan_mac_obj *mac_obj, |
2139 | u8 cl_id, u32 cid, u8 func_id, void *rdata, |
2140 | dma_addr_t rdata_mapping, int state, |
2141 | unsigned long *pstate, bnx2x_obj_type type, |
2142 | struct bnx2x_credit_pool_obj *macs_pool) |
2143 | { |
2144 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; |
2145 | |
2146 | bnx2x_init_vlan_mac_common(o: mac_obj, cl_id, cid, func_id, rdata, |
2147 | rdata_mapping, state, pstate, type, |
2148 | macs_pool, NULL); |
2149 | |
2150 | /* CAM credit pool handling */ |
2151 | mac_obj->get_credit = bnx2x_get_credit_mac; |
2152 | mac_obj->put_credit = bnx2x_put_credit_mac; |
2153 | mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; |
2154 | mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; |
2155 | |
2156 | if (CHIP_IS_E1x(bp)) { |
2157 | mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; |
2158 | mac_obj->check_del = bnx2x_check_mac_del; |
2159 | mac_obj->check_add = bnx2x_check_mac_add; |
2160 | mac_obj->check_move = bnx2x_check_move_always_err; |
2161 | mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; |
2162 | |
2163 | /* Exe Queue */ |
2164 | bnx2x_exe_queue_init(bp, |
2165 | o: &mac_obj->exe_queue, exe_len: 1, owner: qable_obj, |
2166 | validate: bnx2x_validate_vlan_mac, |
2167 | remove: bnx2x_remove_vlan_mac, |
2168 | optimize: bnx2x_optimize_vlan_mac, |
2169 | exec: bnx2x_execute_vlan_mac, |
2170 | get: bnx2x_exeq_get_mac); |
2171 | } else { |
2172 | mac_obj->set_one_rule = bnx2x_set_one_mac_e2; |
2173 | mac_obj->check_del = bnx2x_check_mac_del; |
2174 | mac_obj->check_add = bnx2x_check_mac_add; |
2175 | mac_obj->check_move = bnx2x_check_move; |
2176 | mac_obj->ramrod_cmd = |
2177 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
2178 | mac_obj->get_n_elements = bnx2x_get_n_elements; |
2179 | |
2180 | /* Exe Queue */ |
2181 | bnx2x_exe_queue_init(bp, |
2182 | o: &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, |
2183 | owner: qable_obj, validate: bnx2x_validate_vlan_mac, |
2184 | remove: bnx2x_remove_vlan_mac, |
2185 | optimize: bnx2x_optimize_vlan_mac, |
2186 | exec: bnx2x_execute_vlan_mac, |
2187 | get: bnx2x_exeq_get_mac); |
2188 | } |
2189 | } |
2190 | |
2191 | void bnx2x_init_vlan_obj(struct bnx2x *bp, |
2192 | struct bnx2x_vlan_mac_obj *vlan_obj, |
2193 | u8 cl_id, u32 cid, u8 func_id, void *rdata, |
2194 | dma_addr_t rdata_mapping, int state, |
2195 | unsigned long *pstate, bnx2x_obj_type type, |
2196 | struct bnx2x_credit_pool_obj *vlans_pool) |
2197 | { |
2198 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; |
2199 | |
2200 | bnx2x_init_vlan_mac_common(o: vlan_obj, cl_id, cid, func_id, rdata, |
2201 | rdata_mapping, state, pstate, type, NULL, |
2202 | vlans_pool); |
2203 | |
2204 | vlan_obj->get_credit = bnx2x_get_credit_vlan; |
2205 | vlan_obj->put_credit = bnx2x_put_credit_vlan; |
2206 | vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; |
2207 | vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; |
2208 | |
2209 | if (CHIP_IS_E1x(bp)) { |
2210 | BNX2X_ERR("Do not support chips others than E2 and newer\n" ); |
2211 | BUG(); |
2212 | } else { |
2213 | vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; |
2214 | vlan_obj->check_del = bnx2x_check_vlan_del; |
2215 | vlan_obj->check_add = bnx2x_check_vlan_add; |
2216 | vlan_obj->check_move = bnx2x_check_move; |
2217 | vlan_obj->ramrod_cmd = |
2218 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
2219 | vlan_obj->get_n_elements = bnx2x_get_n_elements; |
2220 | |
2221 | /* Exe Queue */ |
2222 | bnx2x_exe_queue_init(bp, |
2223 | o: &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, |
2224 | owner: qable_obj, validate: bnx2x_validate_vlan_mac, |
2225 | remove: bnx2x_remove_vlan_mac, |
2226 | optimize: bnx2x_optimize_vlan_mac, |
2227 | exec: bnx2x_execute_vlan_mac, |
2228 | get: bnx2x_exeq_get_vlan); |
2229 | } |
2230 | } |
2231 | |
2232 | void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, |
2233 | struct bnx2x_vlan_mac_obj *vlan_mac_obj, |
2234 | u8 cl_id, u32 cid, u8 func_id, void *rdata, |
2235 | dma_addr_t rdata_mapping, int state, |
2236 | unsigned long *pstate, bnx2x_obj_type type, |
2237 | struct bnx2x_credit_pool_obj *macs_pool, |
2238 | struct bnx2x_credit_pool_obj *vlans_pool) |
2239 | { |
2240 | union bnx2x_qable_obj *qable_obj = |
2241 | (union bnx2x_qable_obj *)vlan_mac_obj; |
2242 | |
2243 | bnx2x_init_vlan_mac_common(o: vlan_mac_obj, cl_id, cid, func_id, rdata, |
2244 | rdata_mapping, state, pstate, type, |
2245 | macs_pool, vlans_pool); |
2246 | |
2247 | /* CAM pool handling */ |
2248 | vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; |
2249 | vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; |
2250 | /* CAM offset is relevant for 57710 and 57711 chips only which have a |
2251 | * single CAM for both MACs and VLAN-MAC pairs. So the offset |
2252 | * will be taken from MACs' pool object only. |
2253 | */ |
2254 | vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; |
2255 | vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; |
2256 | |
2257 | if (CHIP_IS_E1(bp)) { |
2258 | BNX2X_ERR("Do not support chips others than E2\n" ); |
2259 | BUG(); |
2260 | } else if (CHIP_IS_E1H(bp)) { |
2261 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; |
2262 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; |
2263 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; |
2264 | vlan_mac_obj->check_move = bnx2x_check_move_always_err; |
2265 | vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; |
2266 | |
2267 | /* Exe Queue */ |
2268 | bnx2x_exe_queue_init(bp, |
2269 | o: &vlan_mac_obj->exe_queue, exe_len: 1, owner: qable_obj, |
2270 | validate: bnx2x_validate_vlan_mac, |
2271 | remove: bnx2x_remove_vlan_mac, |
2272 | optimize: bnx2x_optimize_vlan_mac, |
2273 | exec: bnx2x_execute_vlan_mac, |
2274 | get: bnx2x_exeq_get_vlan_mac); |
2275 | } else { |
2276 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; |
2277 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; |
2278 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; |
2279 | vlan_mac_obj->check_move = bnx2x_check_move; |
2280 | vlan_mac_obj->ramrod_cmd = |
2281 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; |
2282 | |
2283 | /* Exe Queue */ |
2284 | bnx2x_exe_queue_init(bp, |
2285 | o: &vlan_mac_obj->exe_queue, |
2286 | CLASSIFY_RULES_COUNT, |
2287 | owner: qable_obj, validate: bnx2x_validate_vlan_mac, |
2288 | remove: bnx2x_remove_vlan_mac, |
2289 | optimize: bnx2x_optimize_vlan_mac, |
2290 | exec: bnx2x_execute_vlan_mac, |
2291 | get: bnx2x_exeq_get_vlan_mac); |
2292 | } |
2293 | } |
2294 | /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ |
2295 | static inline void __storm_memset_mac_filters(struct bnx2x *bp, |
2296 | struct tstorm_eth_mac_filter_config *mac_filters, |
2297 | u16 pf_id) |
2298 | { |
2299 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); |
2300 | |
2301 | u32 addr = BAR_TSTRORM_INTMEM + |
2302 | TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); |
2303 | |
2304 | __storm_memset_struct(bp, addr, size, data: (u32 *)mac_filters); |
2305 | } |
2306 | |
2307 | static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, |
2308 | struct bnx2x_rx_mode_ramrod_params *p) |
2309 | { |
2310 | /* update the bp MAC filter structure */ |
2311 | u32 mask = (1 << p->cl_id); |
2312 | |
2313 | struct tstorm_eth_mac_filter_config *mac_filters = |
2314 | (struct tstorm_eth_mac_filter_config *)p->rdata; |
2315 | |
2316 | /* initial setting is drop-all */ |
2317 | u8 drop_all_ucast = 1, drop_all_mcast = 1; |
2318 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; |
2319 | u8 unmatched_unicast = 0; |
2320 | |
2321 | /* In e1x there we only take into account rx accept flag since tx switching |
2322 | * isn't enabled. */ |
2323 | if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) |
2324 | /* accept matched ucast */ |
2325 | drop_all_ucast = 0; |
2326 | |
2327 | if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) |
2328 | /* accept matched mcast */ |
2329 | drop_all_mcast = 0; |
2330 | |
2331 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { |
2332 | /* accept all mcast */ |
2333 | drop_all_ucast = 0; |
2334 | accp_all_ucast = 1; |
2335 | } |
2336 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { |
2337 | /* accept all mcast */ |
2338 | drop_all_mcast = 0; |
2339 | accp_all_mcast = 1; |
2340 | } |
2341 | if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) |
2342 | /* accept (all) bcast */ |
2343 | accp_all_bcast = 1; |
2344 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) |
2345 | /* accept unmatched unicasts */ |
2346 | unmatched_unicast = 1; |
2347 | |
2348 | mac_filters->ucast_drop_all = drop_all_ucast ? |
2349 | mac_filters->ucast_drop_all | mask : |
2350 | mac_filters->ucast_drop_all & ~mask; |
2351 | |
2352 | mac_filters->mcast_drop_all = drop_all_mcast ? |
2353 | mac_filters->mcast_drop_all | mask : |
2354 | mac_filters->mcast_drop_all & ~mask; |
2355 | |
2356 | mac_filters->ucast_accept_all = accp_all_ucast ? |
2357 | mac_filters->ucast_accept_all | mask : |
2358 | mac_filters->ucast_accept_all & ~mask; |
2359 | |
2360 | mac_filters->mcast_accept_all = accp_all_mcast ? |
2361 | mac_filters->mcast_accept_all | mask : |
2362 | mac_filters->mcast_accept_all & ~mask; |
2363 | |
2364 | mac_filters->bcast_accept_all = accp_all_bcast ? |
2365 | mac_filters->bcast_accept_all | mask : |
2366 | mac_filters->bcast_accept_all & ~mask; |
2367 | |
2368 | mac_filters->unmatched_unicast = unmatched_unicast ? |
2369 | mac_filters->unmatched_unicast | mask : |
2370 | mac_filters->unmatched_unicast & ~mask; |
2371 | |
2372 | DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" |
2373 | "accp_mcast 0x%x\naccp_bcast 0x%x\n" , |
2374 | mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, |
2375 | mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, |
2376 | mac_filters->bcast_accept_all); |
2377 | |
2378 | /* write the MAC filter structure*/ |
2379 | __storm_memset_mac_filters(bp, mac_filters, pf_id: p->func_id); |
2380 | |
2381 | /* The operation is completed */ |
2382 | clear_bit(nr: p->state, addr: p->pstate); |
2383 | smp_mb__after_atomic(); |
2384 | |
2385 | return 0; |
2386 | } |
2387 | |
2388 | /* Setup ramrod data */ |
2389 | static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, |
2390 | struct eth_classify_header *hdr, |
2391 | u8 rule_cnt) |
2392 | { |
2393 | hdr->echo = cpu_to_le32(cid); |
2394 | hdr->rule_cnt = rule_cnt; |
2395 | } |
2396 | |
2397 | static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, |
2398 | unsigned long *accept_flags, |
2399 | struct eth_filter_rules_cmd *cmd, |
2400 | bool clear_accept_all) |
2401 | { |
2402 | u16 state; |
2403 | |
2404 | /* start with 'drop-all' */ |
2405 | state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | |
2406 | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
2407 | |
2408 | if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags)) |
2409 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
2410 | |
2411 | if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags)) |
2412 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
2413 | |
2414 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) { |
2415 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
2416 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; |
2417 | } |
2418 | |
2419 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) { |
2420 | state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; |
2421 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; |
2422 | } |
2423 | |
2424 | if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags)) |
2425 | state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; |
2426 | |
2427 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) { |
2428 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; |
2429 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; |
2430 | } |
2431 | |
2432 | if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags)) |
2433 | state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; |
2434 | |
2435 | /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ |
2436 | if (clear_accept_all) { |
2437 | state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; |
2438 | state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; |
2439 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; |
2440 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; |
2441 | } |
2442 | |
2443 | cmd->state = cpu_to_le16(state); |
2444 | } |
2445 | |
2446 | static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, |
2447 | struct bnx2x_rx_mode_ramrod_params *p) |
2448 | { |
2449 | struct eth_filter_rules_ramrod_data *data = p->rdata; |
2450 | int rc; |
2451 | u8 rule_idx = 0; |
2452 | |
2453 | /* Reset the ramrod data buffer */ |
2454 | memset(data, 0, sizeof(*data)); |
2455 | |
2456 | /* Setup ramrod data */ |
2457 | |
2458 | /* Tx (internal switching) */ |
2459 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { |
2460 | data->rules[rule_idx].client_id = p->cl_id; |
2461 | data->rules[rule_idx].func_id = p->func_id; |
2462 | |
2463 | data->rules[rule_idx].cmd_general_data = |
2464 | ETH_FILTER_RULES_CMD_TX_CMD; |
2465 | |
2466 | bnx2x_rx_mode_set_cmd_state_e2(bp, accept_flags: &p->tx_accept_flags, |
2467 | cmd: &(data->rules[rule_idx++]), |
2468 | clear_accept_all: false); |
2469 | } |
2470 | |
2471 | /* Rx */ |
2472 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { |
2473 | data->rules[rule_idx].client_id = p->cl_id; |
2474 | data->rules[rule_idx].func_id = p->func_id; |
2475 | |
2476 | data->rules[rule_idx].cmd_general_data = |
2477 | ETH_FILTER_RULES_CMD_RX_CMD; |
2478 | |
2479 | bnx2x_rx_mode_set_cmd_state_e2(bp, accept_flags: &p->rx_accept_flags, |
2480 | cmd: &(data->rules[rule_idx++]), |
2481 | clear_accept_all: false); |
2482 | } |
2483 | |
2484 | /* If FCoE Queue configuration has been requested configure the Rx and |
2485 | * internal switching modes for this queue in separate rules. |
2486 | * |
2487 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: |
2488 | * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. |
2489 | */ |
2490 | if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { |
2491 | /* Tx (internal switching) */ |
2492 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { |
2493 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); |
2494 | data->rules[rule_idx].func_id = p->func_id; |
2495 | |
2496 | data->rules[rule_idx].cmd_general_data = |
2497 | ETH_FILTER_RULES_CMD_TX_CMD; |
2498 | |
2499 | bnx2x_rx_mode_set_cmd_state_e2(bp, accept_flags: &p->tx_accept_flags, |
2500 | cmd: &(data->rules[rule_idx]), |
2501 | clear_accept_all: true); |
2502 | rule_idx++; |
2503 | } |
2504 | |
2505 | /* Rx */ |
2506 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { |
2507 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); |
2508 | data->rules[rule_idx].func_id = p->func_id; |
2509 | |
2510 | data->rules[rule_idx].cmd_general_data = |
2511 | ETH_FILTER_RULES_CMD_RX_CMD; |
2512 | |
2513 | bnx2x_rx_mode_set_cmd_state_e2(bp, accept_flags: &p->rx_accept_flags, |
2514 | cmd: &(data->rules[rule_idx]), |
2515 | clear_accept_all: true); |
2516 | rule_idx++; |
2517 | } |
2518 | } |
2519 | |
2520 | /* Set the ramrod header (most importantly - number of rules to |
2521 | * configure). |
2522 | */ |
2523 | bnx2x_rx_mode_set_rdata_hdr_e2(cid: p->cid, hdr: &data->header, rule_cnt: rule_idx); |
2524 | |
2525 | DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n" , |
2526 | data->header.rule_cnt, p->rx_accept_flags, |
2527 | p->tx_accept_flags); |
2528 | |
2529 | /* No need for an explicit memory barrier here as long as we |
2530 | * ensure the ordering of writing to the SPQ element |
2531 | * and updating of the SPQ producer which involves a memory |
2532 | * read. If the memory read is removed we will have to put a |
2533 | * full memory barrier there (inside bnx2x_sp_post()). |
2534 | */ |
2535 | |
2536 | /* Send a ramrod */ |
2537 | rc = bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_FILTER_RULES, cid: p->cid, |
2538 | U64_HI(p->rdata_mapping), |
2539 | U64_LO(p->rdata_mapping), |
2540 | cmd_type: ETH_CONNECTION_TYPE); |
2541 | if (rc) |
2542 | return rc; |
2543 | |
2544 | /* Ramrod completion is pending */ |
2545 | return 1; |
2546 | } |
2547 | |
2548 | static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, |
2549 | struct bnx2x_rx_mode_ramrod_params *p) |
2550 | { |
2551 | return bnx2x_state_wait(bp, state: p->state, pstate: p->pstate); |
2552 | } |
2553 | |
2554 | static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, |
2555 | struct bnx2x_rx_mode_ramrod_params *p) |
2556 | { |
2557 | /* Do nothing */ |
2558 | return 0; |
2559 | } |
2560 | |
2561 | int bnx2x_config_rx_mode(struct bnx2x *bp, |
2562 | struct bnx2x_rx_mode_ramrod_params *p) |
2563 | { |
2564 | int rc; |
2565 | |
2566 | /* Configure the new classification in the chip */ |
2567 | rc = p->rx_mode_obj->config_rx_mode(bp, p); |
2568 | if (rc < 0) |
2569 | return rc; |
2570 | |
2571 | /* Wait for a ramrod completion if was requested */ |
2572 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { |
2573 | rc = p->rx_mode_obj->wait_comp(bp, p); |
2574 | if (rc) |
2575 | return rc; |
2576 | } |
2577 | |
2578 | return rc; |
2579 | } |
2580 | |
2581 | void bnx2x_init_rx_mode_obj(struct bnx2x *bp, |
2582 | struct bnx2x_rx_mode_obj *o) |
2583 | { |
2584 | if (CHIP_IS_E1x(bp)) { |
2585 | o->wait_comp = bnx2x_empty_rx_mode_wait; |
2586 | o->config_rx_mode = bnx2x_set_rx_mode_e1x; |
2587 | } else { |
2588 | o->wait_comp = bnx2x_wait_rx_mode_comp_e2; |
2589 | o->config_rx_mode = bnx2x_set_rx_mode_e2; |
2590 | } |
2591 | } |
2592 | |
2593 | /********************* Multicast verbs: SET, CLEAR ****************************/ |
2594 | static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) |
2595 | { |
2596 | return (crc32c_le(crc: 0, address: mac, ETH_ALEN) >> 24) & 0xff; |
2597 | } |
2598 | |
2599 | struct bnx2x_mcast_mac_elem { |
2600 | struct list_head link; |
2601 | u8 mac[ETH_ALEN]; |
2602 | u8 pad[2]; /* For a natural alignment of the following buffer */ |
2603 | }; |
2604 | |
2605 | struct bnx2x_mcast_bin_elem { |
2606 | struct list_head link; |
2607 | int bin; |
2608 | int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */ |
2609 | }; |
2610 | |
2611 | union bnx2x_mcast_elem { |
2612 | struct bnx2x_mcast_bin_elem bin_elem; |
2613 | struct bnx2x_mcast_mac_elem mac_elem; |
2614 | }; |
2615 | |
2616 | struct bnx2x_mcast_elem_group { |
2617 | struct list_head mcast_group_link; |
2618 | union bnx2x_mcast_elem mcast_elems[]; |
2619 | }; |
2620 | |
2621 | #define MCAST_MAC_ELEMS_PER_PG \ |
2622 | ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \ |
2623 | sizeof(union bnx2x_mcast_elem)) |
2624 | |
2625 | struct bnx2x_pending_mcast_cmd { |
2626 | struct list_head link; |
2627 | struct list_head group_head; |
2628 | int type; /* BNX2X_MCAST_CMD_X */ |
2629 | union { |
2630 | struct list_head macs_head; |
2631 | u32 macs_num; /* Needed for DEL command */ |
2632 | int next_bin; /* Needed for RESTORE flow with aprox match */ |
2633 | } data; |
2634 | |
2635 | bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set |
2636 | * when macs_head had been converted to a list of |
2637 | * bnx2x_mcast_bin_elem. |
2638 | */ |
2639 | |
2640 | bool done; /* set to true, when the command has been handled, |
2641 | * practically used in 57712 handling only, where one pending |
2642 | * command may be handled in a few operations. As long as for |
2643 | * other chips every operation handling is completed in a |
2644 | * single ramrod, there is no need to utilize this field. |
2645 | */ |
2646 | }; |
2647 | |
2648 | static int bnx2x_mcast_wait(struct bnx2x *bp, |
2649 | struct bnx2x_mcast_obj *o) |
2650 | { |
2651 | if (bnx2x_state_wait(bp, state: o->sched_state, pstate: o->raw.pstate) || |
2652 | o->raw.wait_comp(bp, &o->raw)) |
2653 | return -EBUSY; |
2654 | |
2655 | return 0; |
2656 | } |
2657 | |
2658 | static void bnx2x_free_groups(struct list_head *mcast_group_list) |
2659 | { |
2660 | struct bnx2x_mcast_elem_group *current_mcast_group; |
2661 | |
2662 | while (!list_empty(head: mcast_group_list)) { |
2663 | current_mcast_group = list_first_entry(mcast_group_list, |
2664 | struct bnx2x_mcast_elem_group, |
2665 | mcast_group_link); |
2666 | list_del(entry: ¤t_mcast_group->mcast_group_link); |
2667 | free_page((unsigned long)current_mcast_group); |
2668 | } |
2669 | } |
2670 | |
2671 | static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, |
2672 | struct bnx2x_mcast_obj *o, |
2673 | struct bnx2x_mcast_ramrod_params *p, |
2674 | enum bnx2x_mcast_cmd cmd) |
2675 | { |
2676 | struct bnx2x_pending_mcast_cmd *new_cmd; |
2677 | struct bnx2x_mcast_list_elem *pos; |
2678 | struct bnx2x_mcast_elem_group *elem_group; |
2679 | struct bnx2x_mcast_mac_elem *mac_elem; |
2680 | int total_elems = 0, macs_list_len = 0, offset = 0; |
2681 | |
2682 | /* When adding MACs we'll need to store their values */ |
2683 | if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET) |
2684 | macs_list_len = p->mcast_list_len; |
2685 | |
2686 | /* If the command is empty ("handle pending commands only"), break */ |
2687 | if (!p->mcast_list_len) |
2688 | return 0; |
2689 | |
2690 | /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ |
2691 | new_cmd = kzalloc(size: sizeof(*new_cmd), GFP_ATOMIC); |
2692 | if (!new_cmd) |
2693 | return -ENOMEM; |
2694 | |
2695 | INIT_LIST_HEAD(list: &new_cmd->data.macs_head); |
2696 | INIT_LIST_HEAD(list: &new_cmd->group_head); |
2697 | new_cmd->type = cmd; |
2698 | new_cmd->done = false; |
2699 | |
2700 | DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n" , |
2701 | cmd, macs_list_len); |
2702 | |
2703 | switch (cmd) { |
2704 | case BNX2X_MCAST_CMD_ADD: |
2705 | case BNX2X_MCAST_CMD_SET: |
2706 | /* For a set command, we need to allocate sufficient memory for |
2707 | * all the bins, since we can't analyze at this point how much |
2708 | * memory would be required. |
2709 | */ |
2710 | total_elems = macs_list_len; |
2711 | if (cmd == BNX2X_MCAST_CMD_SET) { |
2712 | if (total_elems < BNX2X_MCAST_BINS_NUM) |
2713 | total_elems = BNX2X_MCAST_BINS_NUM; |
2714 | } |
2715 | while (total_elems > 0) { |
2716 | elem_group = (struct bnx2x_mcast_elem_group *) |
2717 | __get_free_page(GFP_ATOMIC | __GFP_ZERO); |
2718 | if (!elem_group) { |
2719 | bnx2x_free_groups(mcast_group_list: &new_cmd->group_head); |
2720 | kfree(objp: new_cmd); |
2721 | return -ENOMEM; |
2722 | } |
2723 | total_elems -= MCAST_MAC_ELEMS_PER_PG; |
2724 | list_add_tail(new: &elem_group->mcast_group_link, |
2725 | head: &new_cmd->group_head); |
2726 | } |
2727 | elem_group = list_first_entry(&new_cmd->group_head, |
2728 | struct bnx2x_mcast_elem_group, |
2729 | mcast_group_link); |
2730 | list_for_each_entry(pos, &p->mcast_list, link) { |
2731 | mac_elem = &elem_group->mcast_elems[offset].mac_elem; |
2732 | memcpy(mac_elem->mac, pos->mac, ETH_ALEN); |
2733 | /* Push the MACs of the current command into the pending |
2734 | * command MACs list: FIFO |
2735 | */ |
2736 | list_add_tail(new: &mac_elem->link, |
2737 | head: &new_cmd->data.macs_head); |
2738 | offset++; |
2739 | if (offset == MCAST_MAC_ELEMS_PER_PG) { |
2740 | offset = 0; |
2741 | elem_group = list_next_entry(elem_group, |
2742 | mcast_group_link); |
2743 | } |
2744 | } |
2745 | break; |
2746 | |
2747 | case BNX2X_MCAST_CMD_DEL: |
2748 | new_cmd->data.macs_num = p->mcast_list_len; |
2749 | break; |
2750 | |
2751 | case BNX2X_MCAST_CMD_RESTORE: |
2752 | new_cmd->data.next_bin = 0; |
2753 | break; |
2754 | |
2755 | default: |
2756 | kfree(objp: new_cmd); |
2757 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
2758 | return -EINVAL; |
2759 | } |
2760 | |
2761 | /* Push the new pending command to the tail of the pending list: FIFO */ |
2762 | list_add_tail(new: &new_cmd->link, head: &o->pending_cmds_head); |
2763 | |
2764 | o->set_sched(o); |
2765 | |
2766 | return 1; |
2767 | } |
2768 | |
2769 | /** |
2770 | * bnx2x_mcast_get_next_bin - get the next set bin (index) |
2771 | * |
2772 | * @o: multicast object info |
2773 | * @last: index to start looking from (including) |
2774 | * |
2775 | * returns the next found (set) bin or a negative value if none is found. |
2776 | */ |
2777 | static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) |
2778 | { |
2779 | int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; |
2780 | |
2781 | for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { |
2782 | if (o->registry.aprox_match.vec[i]) |
2783 | for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { |
2784 | int cur_bit = j + BIT_VEC64_ELEM_SZ * i; |
2785 | if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. |
2786 | vec, cur_bit)) { |
2787 | return cur_bit; |
2788 | } |
2789 | } |
2790 | inner_start = 0; |
2791 | } |
2792 | |
2793 | /* None found */ |
2794 | return -1; |
2795 | } |
2796 | |
2797 | /** |
2798 | * bnx2x_mcast_clear_first_bin - find the first set bin and clear it |
2799 | * |
2800 | * @o: |
2801 | * |
2802 | * returns the index of the found bin or -1 if none is found |
2803 | */ |
2804 | static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) |
2805 | { |
2806 | int cur_bit = bnx2x_mcast_get_next_bin(o, last: 0); |
2807 | |
2808 | if (cur_bit >= 0) |
2809 | BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); |
2810 | |
2811 | return cur_bit; |
2812 | } |
2813 | |
2814 | static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) |
2815 | { |
2816 | struct bnx2x_raw_obj *raw = &o->raw; |
2817 | u8 rx_tx_flag = 0; |
2818 | |
2819 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || |
2820 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) |
2821 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; |
2822 | |
2823 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || |
2824 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) |
2825 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; |
2826 | |
2827 | return rx_tx_flag; |
2828 | } |
2829 | |
2830 | static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, |
2831 | struct bnx2x_mcast_obj *o, int idx, |
2832 | union bnx2x_mcast_config_data *cfg_data, |
2833 | enum bnx2x_mcast_cmd cmd) |
2834 | { |
2835 | struct bnx2x_raw_obj *r = &o->raw; |
2836 | struct eth_multicast_rules_ramrod_data *data = |
2837 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); |
2838 | u8 func_id = r->func_id; |
2839 | u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); |
2840 | int bin; |
2841 | |
2842 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) || |
2843 | (cmd == BNX2X_MCAST_CMD_SET_ADD)) |
2844 | rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; |
2845 | |
2846 | data->rules[idx].cmd_general_data |= rx_tx_add_flag; |
2847 | |
2848 | /* Get a bin and update a bins' vector */ |
2849 | switch (cmd) { |
2850 | case BNX2X_MCAST_CMD_ADD: |
2851 | bin = bnx2x_mcast_bin_from_mac(mac: cfg_data->mac); |
2852 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); |
2853 | break; |
2854 | |
2855 | case BNX2X_MCAST_CMD_DEL: |
2856 | /* If there were no more bins to clear |
2857 | * (bnx2x_mcast_clear_first_bin() returns -1) then we would |
2858 | * clear any (0xff) bin. |
2859 | * See bnx2x_mcast_validate_e2() for explanation when it may |
2860 | * happen. |
2861 | */ |
2862 | bin = bnx2x_mcast_clear_first_bin(o); |
2863 | break; |
2864 | |
2865 | case BNX2X_MCAST_CMD_RESTORE: |
2866 | bin = cfg_data->bin; |
2867 | break; |
2868 | |
2869 | case BNX2X_MCAST_CMD_SET_ADD: |
2870 | bin = cfg_data->bin; |
2871 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); |
2872 | break; |
2873 | |
2874 | case BNX2X_MCAST_CMD_SET_DEL: |
2875 | bin = cfg_data->bin; |
2876 | BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin); |
2877 | break; |
2878 | |
2879 | default: |
2880 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
2881 | return; |
2882 | } |
2883 | |
2884 | DP(BNX2X_MSG_SP, "%s bin %d\n" , |
2885 | ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? |
2886 | "Setting" : "Clearing" ), bin); |
2887 | |
2888 | data->rules[idx].bin_id = (u8)bin; |
2889 | data->rules[idx].func_id = func_id; |
2890 | data->rules[idx].engine_id = o->engine_id; |
2891 | } |
2892 | |
2893 | /** |
2894 | * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry |
2895 | * |
2896 | * @bp: device handle |
2897 | * @o: multicast object info |
2898 | * @start_bin: index in the registry to start from (including) |
2899 | * @rdata_idx: index in the ramrod data to start from |
2900 | * |
2901 | * returns last handled bin index or -1 if all bins have been handled |
2902 | */ |
2903 | static inline int bnx2x_mcast_handle_restore_cmd_e2( |
2904 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, |
2905 | int *rdata_idx) |
2906 | { |
2907 | int cur_bin, cnt = *rdata_idx; |
2908 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
2909 | |
2910 | /* go through the registry and configure the bins from it */ |
2911 | for (cur_bin = bnx2x_mcast_get_next_bin(o, last: start_bin); cur_bin >= 0; |
2912 | cur_bin = bnx2x_mcast_get_next_bin(o, last: cur_bin + 1)) { |
2913 | |
2914 | cfg_data.bin = (u8)cur_bin; |
2915 | o->set_one_rule(bp, o, cnt, &cfg_data, |
2916 | BNX2X_MCAST_CMD_RESTORE); |
2917 | |
2918 | cnt++; |
2919 | |
2920 | DP(BNX2X_MSG_SP, "About to configure a bin %d\n" , cur_bin); |
2921 | |
2922 | /* Break if we reached the maximum number |
2923 | * of rules. |
2924 | */ |
2925 | if (cnt >= o->max_cmd_len) |
2926 | break; |
2927 | } |
2928 | |
2929 | *rdata_idx = cnt; |
2930 | |
2931 | return cur_bin; |
2932 | } |
2933 | |
2934 | static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, |
2935 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, |
2936 | int *line_idx) |
2937 | { |
2938 | struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; |
2939 | int cnt = *line_idx; |
2940 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
2941 | |
2942 | list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, |
2943 | link) { |
2944 | |
2945 | cfg_data.mac = &pmac_pos->mac[0]; |
2946 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); |
2947 | |
2948 | cnt++; |
2949 | |
2950 | DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n" , |
2951 | pmac_pos->mac); |
2952 | |
2953 | list_del(entry: &pmac_pos->link); |
2954 | |
2955 | /* Break if we reached the maximum number |
2956 | * of rules. |
2957 | */ |
2958 | if (cnt >= o->max_cmd_len) |
2959 | break; |
2960 | } |
2961 | |
2962 | *line_idx = cnt; |
2963 | |
2964 | /* if no more MACs to configure - we are done */ |
2965 | if (list_empty(head: &cmd_pos->data.macs_head)) |
2966 | cmd_pos->done = true; |
2967 | } |
2968 | |
2969 | static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, |
2970 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, |
2971 | int *line_idx) |
2972 | { |
2973 | int cnt = *line_idx; |
2974 | |
2975 | while (cmd_pos->data.macs_num) { |
2976 | o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); |
2977 | |
2978 | cnt++; |
2979 | |
2980 | cmd_pos->data.macs_num--; |
2981 | |
2982 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n" , |
2983 | cmd_pos->data.macs_num, cnt); |
2984 | |
2985 | /* Break if we reached the maximum |
2986 | * number of rules. |
2987 | */ |
2988 | if (cnt >= o->max_cmd_len) |
2989 | break; |
2990 | } |
2991 | |
2992 | *line_idx = cnt; |
2993 | |
2994 | /* If we cleared all bins - we are done */ |
2995 | if (!cmd_pos->data.macs_num) |
2996 | cmd_pos->done = true; |
2997 | } |
2998 | |
2999 | static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, |
3000 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, |
3001 | int *line_idx) |
3002 | { |
3003 | cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, |
3004 | line_idx); |
3005 | |
3006 | if (cmd_pos->data.next_bin < 0) |
3007 | /* If o->set_restore returned -1 we are done */ |
3008 | cmd_pos->done = true; |
3009 | else |
3010 | /* Start from the next bin next time */ |
3011 | cmd_pos->data.next_bin++; |
3012 | } |
3013 | |
3014 | static void |
3015 | bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp, |
3016 | struct bnx2x_mcast_obj *o, |
3017 | struct bnx2x_pending_mcast_cmd *cmd_pos) |
3018 | { |
3019 | u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ]; |
3020 | struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; |
3021 | struct bnx2x_mcast_bin_elem *p_item; |
3022 | struct bnx2x_mcast_elem_group *elem_group; |
3023 | int cnt = 0, mac_cnt = 0, offset = 0, i; |
3024 | |
3025 | memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ); |
3026 | memcpy(cur, o->registry.aprox_match.vec, |
3027 | sizeof(u64) * BNX2X_MCAST_VEC_SZ); |
3028 | |
3029 | /* Fill `current' with the required set of bins to configure */ |
3030 | list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, |
3031 | link) { |
3032 | int bin = bnx2x_mcast_bin_from_mac(mac: pmac_pos->mac); |
3033 | |
3034 | DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n" , |
3035 | pmac_pos->mac); |
3036 | |
3037 | BIT_VEC64_SET_BIT(req, bin); |
3038 | list_del(entry: &pmac_pos->link); |
3039 | mac_cnt++; |
3040 | } |
3041 | |
3042 | /* We no longer have use for the MACs; Need to re-use memory for |
3043 | * a list that will be used to configure bins. |
3044 | */ |
3045 | cmd_pos->set_convert = true; |
3046 | INIT_LIST_HEAD(list: &cmd_pos->data.macs_head); |
3047 | elem_group = list_first_entry(&cmd_pos->group_head, |
3048 | struct bnx2x_mcast_elem_group, |
3049 | mcast_group_link); |
3050 | for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) { |
3051 | bool b_current = !!BIT_VEC64_TEST_BIT(cur, i); |
3052 | bool b_required = !!BIT_VEC64_TEST_BIT(req, i); |
3053 | |
3054 | if (b_current == b_required) |
3055 | continue; |
3056 | |
3057 | p_item = &elem_group->mcast_elems[offset].bin_elem; |
3058 | p_item->bin = i; |
3059 | p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD |
3060 | : BNX2X_MCAST_CMD_SET_DEL; |
3061 | list_add_tail(new: &p_item->link , head: &cmd_pos->data.macs_head); |
3062 | cnt++; |
3063 | offset++; |
3064 | if (offset == MCAST_MAC_ELEMS_PER_PG) { |
3065 | offset = 0; |
3066 | elem_group = list_next_entry(elem_group, |
3067 | mcast_group_link); |
3068 | } |
3069 | } |
3070 | |
3071 | /* We now definitely know how many commands are hiding here. |
3072 | * Also need to correct the disruption we've added to guarantee this |
3073 | * would be enqueued. |
3074 | */ |
3075 | o->total_pending_num -= (o->max_cmd_len + mac_cnt); |
3076 | o->total_pending_num += cnt; |
3077 | |
3078 | DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n" , o->total_pending_num); |
3079 | } |
3080 | |
3081 | static void |
3082 | bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp, |
3083 | struct bnx2x_mcast_obj *o, |
3084 | struct bnx2x_pending_mcast_cmd *cmd_pos, |
3085 | int *cnt) |
3086 | { |
3087 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
3088 | struct bnx2x_mcast_bin_elem *p_item, *p_item_n; |
3089 | |
3090 | /* This is actually a 2-part scheme - it starts by converting the MACs |
3091 | * into a list of bins to be added/removed, and correcting the numbers |
3092 | * on the object. this is now allowed, as we're now sure that all |
3093 | * previous configured requests have already applied. |
3094 | * The second part is actually adding rules for the newly introduced |
3095 | * entries [like all the rest of the hdl_pending functions]. |
3096 | */ |
3097 | if (!cmd_pos->set_convert) |
3098 | bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos); |
3099 | |
3100 | list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head, |
3101 | link) { |
3102 | cfg_data.bin = (u8)p_item->bin; |
3103 | o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type); |
3104 | (*cnt)++; |
3105 | |
3106 | list_del(entry: &p_item->link); |
3107 | |
3108 | /* Break if we reached the maximum number of rules. */ |
3109 | if (*cnt >= o->max_cmd_len) |
3110 | break; |
3111 | } |
3112 | |
3113 | /* if no more MACs to configure - we are done */ |
3114 | if (list_empty(head: &cmd_pos->data.macs_head)) |
3115 | cmd_pos->done = true; |
3116 | } |
3117 | |
3118 | static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, |
3119 | struct bnx2x_mcast_ramrod_params *p) |
3120 | { |
3121 | struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; |
3122 | int cnt = 0; |
3123 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3124 | |
3125 | list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, |
3126 | link) { |
3127 | switch (cmd_pos->type) { |
3128 | case BNX2X_MCAST_CMD_ADD: |
3129 | bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, line_idx: &cnt); |
3130 | break; |
3131 | |
3132 | case BNX2X_MCAST_CMD_DEL: |
3133 | bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, line_idx: &cnt); |
3134 | break; |
3135 | |
3136 | case BNX2X_MCAST_CMD_RESTORE: |
3137 | bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, |
3138 | line_idx: &cnt); |
3139 | break; |
3140 | |
3141 | case BNX2X_MCAST_CMD_SET: |
3142 | bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, cnt: &cnt); |
3143 | break; |
3144 | |
3145 | default: |
3146 | BNX2X_ERR("Unknown command: %d\n" , cmd_pos->type); |
3147 | return -EINVAL; |
3148 | } |
3149 | |
3150 | /* If the command has been completed - remove it from the list |
3151 | * and free the memory |
3152 | */ |
3153 | if (cmd_pos->done) { |
3154 | list_del(entry: &cmd_pos->link); |
3155 | bnx2x_free_groups(mcast_group_list: &cmd_pos->group_head); |
3156 | kfree(objp: cmd_pos); |
3157 | } |
3158 | |
3159 | /* Break if we reached the maximum number of rules */ |
3160 | if (cnt >= o->max_cmd_len) |
3161 | break; |
3162 | } |
3163 | |
3164 | return cnt; |
3165 | } |
3166 | |
3167 | static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, |
3168 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, |
3169 | int *line_idx) |
3170 | { |
3171 | struct bnx2x_mcast_list_elem *mlist_pos; |
3172 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
3173 | int cnt = *line_idx; |
3174 | |
3175 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { |
3176 | cfg_data.mac = mlist_pos->mac; |
3177 | o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); |
3178 | |
3179 | cnt++; |
3180 | |
3181 | DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n" , |
3182 | mlist_pos->mac); |
3183 | } |
3184 | |
3185 | *line_idx = cnt; |
3186 | } |
3187 | |
3188 | static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, |
3189 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, |
3190 | int *line_idx) |
3191 | { |
3192 | int cnt = *line_idx, i; |
3193 | |
3194 | for (i = 0; i < p->mcast_list_len; i++) { |
3195 | o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); |
3196 | |
3197 | cnt++; |
3198 | |
3199 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n" , |
3200 | p->mcast_list_len - i - 1); |
3201 | } |
3202 | |
3203 | *line_idx = cnt; |
3204 | } |
3205 | |
3206 | /** |
3207 | * bnx2x_mcast_handle_current_cmd - send command if room |
3208 | * |
3209 | * @bp: device handle |
3210 | * @p: ramrod mcast info |
3211 | * @cmd: command |
3212 | * @start_cnt: first line in the ramrod data that may be used |
3213 | * |
3214 | * This function is called iff there is enough place for the current command in |
3215 | * the ramrod data. |
3216 | * Returns number of lines filled in the ramrod data in total. |
3217 | */ |
3218 | static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, |
3219 | struct bnx2x_mcast_ramrod_params *p, |
3220 | enum bnx2x_mcast_cmd cmd, |
3221 | int start_cnt) |
3222 | { |
3223 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3224 | int cnt = start_cnt; |
3225 | |
3226 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n" , p->mcast_list_len); |
3227 | |
3228 | switch (cmd) { |
3229 | case BNX2X_MCAST_CMD_ADD: |
3230 | bnx2x_mcast_hdl_add(bp, o, p, line_idx: &cnt); |
3231 | break; |
3232 | |
3233 | case BNX2X_MCAST_CMD_DEL: |
3234 | bnx2x_mcast_hdl_del(bp, o, p, line_idx: &cnt); |
3235 | break; |
3236 | |
3237 | case BNX2X_MCAST_CMD_RESTORE: |
3238 | o->hdl_restore(bp, o, 0, &cnt); |
3239 | break; |
3240 | |
3241 | default: |
3242 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
3243 | return -EINVAL; |
3244 | } |
3245 | |
3246 | /* The current command has been handled */ |
3247 | p->mcast_list_len = 0; |
3248 | |
3249 | return cnt; |
3250 | } |
3251 | |
3252 | static int bnx2x_mcast_validate_e2(struct bnx2x *bp, |
3253 | struct bnx2x_mcast_ramrod_params *p, |
3254 | enum bnx2x_mcast_cmd cmd) |
3255 | { |
3256 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3257 | int reg_sz = o->get_registry_size(o); |
3258 | |
3259 | switch (cmd) { |
3260 | /* DEL command deletes all currently configured MACs */ |
3261 | case BNX2X_MCAST_CMD_DEL: |
3262 | o->set_registry_size(o, 0); |
3263 | fallthrough; |
3264 | |
3265 | /* RESTORE command will restore the entire multicast configuration */ |
3266 | case BNX2X_MCAST_CMD_RESTORE: |
3267 | /* Here we set the approximate amount of work to do, which in |
3268 | * fact may be only less as some MACs in postponed ADD |
3269 | * command(s) scheduled before this command may fall into |
3270 | * the same bin and the actual number of bins set in the |
3271 | * registry would be less than we estimated here. See |
3272 | * bnx2x_mcast_set_one_rule_e2() for further details. |
3273 | */ |
3274 | p->mcast_list_len = reg_sz; |
3275 | break; |
3276 | |
3277 | case BNX2X_MCAST_CMD_ADD: |
3278 | case BNX2X_MCAST_CMD_CONT: |
3279 | /* Here we assume that all new MACs will fall into new bins. |
3280 | * However we will correct the real registry size after we |
3281 | * handle all pending commands. |
3282 | */ |
3283 | o->set_registry_size(o, reg_sz + p->mcast_list_len); |
3284 | break; |
3285 | |
3286 | case BNX2X_MCAST_CMD_SET: |
3287 | /* We can only learn how many commands would actually be used |
3288 | * when this is being configured. So for now, simply guarantee |
3289 | * the command will be enqueued [to refrain from adding logic |
3290 | * that handles this and THEN learns it needs several ramrods]. |
3291 | * Just like for ADD/Cont, the mcast_list_len might be an over |
3292 | * estimation; or even more so, since we don't take into |
3293 | * account the possibility of removal of existing bins. |
3294 | */ |
3295 | o->set_registry_size(o, reg_sz + p->mcast_list_len); |
3296 | o->total_pending_num += o->max_cmd_len; |
3297 | break; |
3298 | |
3299 | default: |
3300 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
3301 | return -EINVAL; |
3302 | } |
3303 | |
3304 | /* Increase the total number of MACs pending to be configured */ |
3305 | o->total_pending_num += p->mcast_list_len; |
3306 | |
3307 | return 0; |
3308 | } |
3309 | |
3310 | static void bnx2x_mcast_revert_e2(struct bnx2x *bp, |
3311 | struct bnx2x_mcast_ramrod_params *p, |
3312 | int old_num_bins, |
3313 | enum bnx2x_mcast_cmd cmd) |
3314 | { |
3315 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3316 | |
3317 | o->set_registry_size(o, old_num_bins); |
3318 | o->total_pending_num -= p->mcast_list_len; |
3319 | |
3320 | if (cmd == BNX2X_MCAST_CMD_SET) |
3321 | o->total_pending_num -= o->max_cmd_len; |
3322 | } |
3323 | |
3324 | /** |
3325 | * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values |
3326 | * |
3327 | * @bp: device handle |
3328 | * @p: ramrod parameters |
3329 | * @len: number of rules to handle |
3330 | */ |
3331 | static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, |
3332 | struct bnx2x_mcast_ramrod_params *p, |
3333 | u8 len) |
3334 | { |
3335 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; |
3336 | struct eth_multicast_rules_ramrod_data *data = |
3337 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); |
3338 | |
3339 | data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | |
3340 | (BNX2X_FILTER_MCAST_PENDING << |
3341 | BNX2X_SWCID_SHIFT)); |
3342 | data->header.rule_cnt = len; |
3343 | } |
3344 | |
3345 | /** |
3346 | * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins |
3347 | * |
3348 | * @bp: device handle |
3349 | * @o: |
3350 | * |
3351 | * Recalculate the actual number of set bins in the registry using Brian |
3352 | * Kernighan's algorithm: it's execution complexity is as a number of set bins. |
3353 | * |
3354 | * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). |
3355 | */ |
3356 | static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, |
3357 | struct bnx2x_mcast_obj *o) |
3358 | { |
3359 | int i, cnt = 0; |
3360 | u64 elem; |
3361 | |
3362 | for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { |
3363 | elem = o->registry.aprox_match.vec[i]; |
3364 | for (; elem; cnt++) |
3365 | elem &= elem - 1; |
3366 | } |
3367 | |
3368 | o->set_registry_size(o, cnt); |
3369 | |
3370 | return 0; |
3371 | } |
3372 | |
3373 | static int bnx2x_mcast_setup_e2(struct bnx2x *bp, |
3374 | struct bnx2x_mcast_ramrod_params *p, |
3375 | enum bnx2x_mcast_cmd cmd) |
3376 | { |
3377 | struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; |
3378 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3379 | struct eth_multicast_rules_ramrod_data *data = |
3380 | (struct eth_multicast_rules_ramrod_data *)(raw->rdata); |
3381 | int cnt = 0, rc; |
3382 | |
3383 | /* Reset the ramrod data buffer */ |
3384 | memset(data, 0, sizeof(*data)); |
3385 | |
3386 | cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); |
3387 | |
3388 | /* If there are no more pending commands - clear SCHEDULED state */ |
3389 | if (list_empty(head: &o->pending_cmds_head)) |
3390 | o->clear_sched(o); |
3391 | |
3392 | /* The below may be true iff there was enough room in ramrod |
3393 | * data for all pending commands and for the current |
3394 | * command. Otherwise the current command would have been added |
3395 | * to the pending commands and p->mcast_list_len would have been |
3396 | * zeroed. |
3397 | */ |
3398 | if (p->mcast_list_len > 0) |
3399 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, start_cnt: cnt); |
3400 | |
3401 | /* We've pulled out some MACs - update the total number of |
3402 | * outstanding. |
3403 | */ |
3404 | o->total_pending_num -= cnt; |
3405 | |
3406 | /* send a ramrod */ |
3407 | WARN_ON(o->total_pending_num < 0); |
3408 | WARN_ON(cnt > o->max_cmd_len); |
3409 | |
3410 | bnx2x_mcast_set_rdata_hdr_e2(bp, p, len: (u8)cnt); |
3411 | |
3412 | /* Update a registry size if there are no more pending operations. |
3413 | * |
3414 | * We don't want to change the value of the registry size if there are |
3415 | * pending operations because we want it to always be equal to the |
3416 | * exact or the approximate number (see bnx2x_mcast_validate_e2()) of |
3417 | * set bins after the last requested operation in order to properly |
3418 | * evaluate the size of the next DEL/RESTORE operation. |
3419 | * |
3420 | * Note that we update the registry itself during command(s) handling |
3421 | * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we |
3422 | * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but |
3423 | * with a limited amount of update commands (per MAC/bin) and we don't |
3424 | * know in this scope what the actual state of bins configuration is |
3425 | * going to be after this ramrod. |
3426 | */ |
3427 | if (!o->total_pending_num) |
3428 | bnx2x_mcast_refresh_registry_e2(bp, o); |
3429 | |
3430 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear |
3431 | * RAMROD_PENDING status immediately. due to the SET option, it's also |
3432 | * possible that after evaluating the differences there's no need for |
3433 | * a ramrod. In that case, we can skip it as well. |
3434 | */ |
3435 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) { |
3436 | raw->clear_pending(raw); |
3437 | return 0; |
3438 | } else { |
3439 | /* No need for an explicit memory barrier here as long as we |
3440 | * ensure the ordering of writing to the SPQ element |
3441 | * and updating of the SPQ producer which involves a memory |
3442 | * read. If the memory read is removed we will have to put a |
3443 | * full memory barrier there (inside bnx2x_sp_post()). |
3444 | */ |
3445 | |
3446 | /* Send a ramrod */ |
3447 | rc = bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_MULTICAST_RULES, |
3448 | cid: raw->cid, U64_HI(raw->rdata_mapping), |
3449 | U64_LO(raw->rdata_mapping), |
3450 | cmd_type: ETH_CONNECTION_TYPE); |
3451 | if (rc) |
3452 | return rc; |
3453 | |
3454 | /* Ramrod completion is pending */ |
3455 | return 1; |
3456 | } |
3457 | } |
3458 | |
3459 | static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, |
3460 | struct bnx2x_mcast_ramrod_params *p, |
3461 | enum bnx2x_mcast_cmd cmd) |
3462 | { |
3463 | if (cmd == BNX2X_MCAST_CMD_SET) { |
3464 | BNX2X_ERR("Can't use `set' command on e1h!\n" ); |
3465 | return -EINVAL; |
3466 | } |
3467 | |
3468 | /* Mark, that there is a work to do */ |
3469 | if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) |
3470 | p->mcast_list_len = 1; |
3471 | |
3472 | return 0; |
3473 | } |
3474 | |
3475 | static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, |
3476 | struct bnx2x_mcast_ramrod_params *p, |
3477 | int old_num_bins, |
3478 | enum bnx2x_mcast_cmd cmd) |
3479 | { |
3480 | /* Do nothing */ |
3481 | } |
3482 | |
3483 | #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ |
3484 | do { \ |
3485 | (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ |
3486 | } while (0) |
3487 | |
3488 | static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, |
3489 | struct bnx2x_mcast_obj *o, |
3490 | struct bnx2x_mcast_ramrod_params *p, |
3491 | u32 *mc_filter) |
3492 | { |
3493 | struct bnx2x_mcast_list_elem *mlist_pos; |
3494 | int bit; |
3495 | |
3496 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { |
3497 | bit = bnx2x_mcast_bin_from_mac(mac: mlist_pos->mac); |
3498 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); |
3499 | |
3500 | DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n" , |
3501 | mlist_pos->mac, bit); |
3502 | |
3503 | /* bookkeeping... */ |
3504 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, |
3505 | bit); |
3506 | } |
3507 | } |
3508 | |
3509 | static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, |
3510 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, |
3511 | u32 *mc_filter) |
3512 | { |
3513 | int bit; |
3514 | |
3515 | for (bit = bnx2x_mcast_get_next_bin(o, last: 0); |
3516 | bit >= 0; |
3517 | bit = bnx2x_mcast_get_next_bin(o, last: bit + 1)) { |
3518 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); |
3519 | DP(BNX2X_MSG_SP, "About to set bin %d\n" , bit); |
3520 | } |
3521 | } |
3522 | |
3523 | /* On 57711 we write the multicast MACs' approximate match |
3524 | * table by directly into the TSTORM's internal RAM. So we don't |
3525 | * really need to handle any tricks to make it work. |
3526 | */ |
3527 | static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, |
3528 | struct bnx2x_mcast_ramrod_params *p, |
3529 | enum bnx2x_mcast_cmd cmd) |
3530 | { |
3531 | int i; |
3532 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3533 | struct bnx2x_raw_obj *r = &o->raw; |
3534 | |
3535 | /* If CLEAR_ONLY has been requested - clear the registry |
3536 | * and clear a pending bit. |
3537 | */ |
3538 | if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
3539 | u32 mc_filter[MC_HASH_SIZE] = {0}; |
3540 | |
3541 | /* Set the multicast filter bits before writing it into |
3542 | * the internal memory. |
3543 | */ |
3544 | switch (cmd) { |
3545 | case BNX2X_MCAST_CMD_ADD: |
3546 | bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); |
3547 | break; |
3548 | |
3549 | case BNX2X_MCAST_CMD_DEL: |
3550 | DP(BNX2X_MSG_SP, |
3551 | "Invalidating multicast MACs configuration\n" ); |
3552 | |
3553 | /* clear the registry */ |
3554 | memset(o->registry.aprox_match.vec, 0, |
3555 | sizeof(o->registry.aprox_match.vec)); |
3556 | break; |
3557 | |
3558 | case BNX2X_MCAST_CMD_RESTORE: |
3559 | bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); |
3560 | break; |
3561 | |
3562 | default: |
3563 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
3564 | return -EINVAL; |
3565 | } |
3566 | |
3567 | /* Set the mcast filter in the internal memory */ |
3568 | for (i = 0; i < MC_HASH_SIZE; i++) |
3569 | REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); |
3570 | } else |
3571 | /* clear the registry */ |
3572 | memset(o->registry.aprox_match.vec, 0, |
3573 | sizeof(o->registry.aprox_match.vec)); |
3574 | |
3575 | /* We are done */ |
3576 | r->clear_pending(r); |
3577 | |
3578 | return 0; |
3579 | } |
3580 | |
3581 | static int bnx2x_mcast_validate_e1(struct bnx2x *bp, |
3582 | struct bnx2x_mcast_ramrod_params *p, |
3583 | enum bnx2x_mcast_cmd cmd) |
3584 | { |
3585 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3586 | int reg_sz = o->get_registry_size(o); |
3587 | |
3588 | if (cmd == BNX2X_MCAST_CMD_SET) { |
3589 | BNX2X_ERR("Can't use `set' command on e1!\n" ); |
3590 | return -EINVAL; |
3591 | } |
3592 | |
3593 | switch (cmd) { |
3594 | /* DEL command deletes all currently configured MACs */ |
3595 | case BNX2X_MCAST_CMD_DEL: |
3596 | o->set_registry_size(o, 0); |
3597 | fallthrough; |
3598 | |
3599 | /* RESTORE command will restore the entire multicast configuration */ |
3600 | case BNX2X_MCAST_CMD_RESTORE: |
3601 | p->mcast_list_len = reg_sz; |
3602 | DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n" , |
3603 | cmd, p->mcast_list_len); |
3604 | break; |
3605 | |
3606 | case BNX2X_MCAST_CMD_ADD: |
3607 | case BNX2X_MCAST_CMD_CONT: |
3608 | /* Multicast MACs on 57710 are configured as unicast MACs and |
3609 | * there is only a limited number of CAM entries for that |
3610 | * matter. |
3611 | */ |
3612 | if (p->mcast_list_len > o->max_cmd_len) { |
3613 | BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n" , |
3614 | o->max_cmd_len); |
3615 | return -EINVAL; |
3616 | } |
3617 | /* Every configured MAC should be cleared if DEL command is |
3618 | * called. Only the last ADD command is relevant as long as |
3619 | * every ADD commands overrides the previous configuration. |
3620 | */ |
3621 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n" , p->mcast_list_len); |
3622 | if (p->mcast_list_len > 0) |
3623 | o->set_registry_size(o, p->mcast_list_len); |
3624 | |
3625 | break; |
3626 | |
3627 | default: |
3628 | BNX2X_ERR("Unknown command: %d\n" , cmd); |
3629 | return -EINVAL; |
3630 | } |
3631 | |
3632 | /* We want to ensure that commands are executed one by one for 57710. |
3633 | * Therefore each none-empty command will consume o->max_cmd_len. |
3634 | */ |
3635 | if (p->mcast_list_len) |
3636 | o->total_pending_num += o->max_cmd_len; |
3637 | |
3638 | return 0; |
3639 | } |
3640 | |
3641 | static void bnx2x_mcast_revert_e1(struct bnx2x *bp, |
3642 | struct bnx2x_mcast_ramrod_params *p, |
3643 | int old_num_macs, |
3644 | enum bnx2x_mcast_cmd cmd) |
3645 | { |
3646 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3647 | |
3648 | o->set_registry_size(o, old_num_macs); |
3649 | |
3650 | /* If current command hasn't been handled yet and we are |
3651 | * here means that it's meant to be dropped and we have to |
3652 | * update the number of outstanding MACs accordingly. |
3653 | */ |
3654 | if (p->mcast_list_len) |
3655 | o->total_pending_num -= o->max_cmd_len; |
3656 | } |
3657 | |
3658 | static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, |
3659 | struct bnx2x_mcast_obj *o, int idx, |
3660 | union bnx2x_mcast_config_data *cfg_data, |
3661 | enum bnx2x_mcast_cmd cmd) |
3662 | { |
3663 | struct bnx2x_raw_obj *r = &o->raw; |
3664 | struct mac_configuration_cmd *data = |
3665 | (struct mac_configuration_cmd *)(r->rdata); |
3666 | |
3667 | /* copy mac */ |
3668 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { |
3669 | bnx2x_set_fw_mac_addr(fw_hi: &data->config_table[idx].msb_mac_addr, |
3670 | fw_mid: &data->config_table[idx].middle_mac_addr, |
3671 | fw_lo: &data->config_table[idx].lsb_mac_addr, |
3672 | mac: cfg_data->mac); |
3673 | |
3674 | data->config_table[idx].vlan_id = 0; |
3675 | data->config_table[idx].pf_id = r->func_id; |
3676 | data->config_table[idx].clients_bit_vector = |
3677 | cpu_to_le32(1 << r->cl_id); |
3678 | |
3679 | SET_FLAG(data->config_table[idx].flags, |
3680 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
3681 | T_ETH_MAC_COMMAND_SET); |
3682 | } |
3683 | } |
3684 | |
3685 | /** |
3686 | * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd |
3687 | * |
3688 | * @bp: device handle |
3689 | * @p: ramrod parameters |
3690 | * @len: number of rules to handle |
3691 | */ |
3692 | static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, |
3693 | struct bnx2x_mcast_ramrod_params *p, |
3694 | u8 len) |
3695 | { |
3696 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; |
3697 | struct mac_configuration_cmd *data = |
3698 | (struct mac_configuration_cmd *)(r->rdata); |
3699 | |
3700 | u8 offset = (CHIP_REV_IS_SLOW(bp) ? |
3701 | BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : |
3702 | BNX2X_MAX_MULTICAST*(1 + r->func_id)); |
3703 | |
3704 | data->hdr.offset = offset; |
3705 | data->hdr.client_id = cpu_to_le16(0xff); |
3706 | data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | |
3707 | (BNX2X_FILTER_MCAST_PENDING << |
3708 | BNX2X_SWCID_SHIFT)); |
3709 | data->hdr.length = len; |
3710 | } |
3711 | |
3712 | /** |
3713 | * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 |
3714 | * |
3715 | * @bp: device handle |
3716 | * @o: multicast info |
3717 | * @start_idx: index in the registry to start from |
3718 | * @rdata_idx: index in the ramrod data to start from |
3719 | * |
3720 | * restore command for 57710 is like all other commands - always a stand alone |
3721 | * command - start_idx and rdata_idx will always be 0. This function will always |
3722 | * succeed. |
3723 | * returns -1 to comply with 57712 variant. |
3724 | */ |
3725 | static inline int bnx2x_mcast_handle_restore_cmd_e1( |
3726 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, |
3727 | int *rdata_idx) |
3728 | { |
3729 | struct bnx2x_mcast_mac_elem *elem; |
3730 | int i = 0; |
3731 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
3732 | |
3733 | /* go through the registry and configure the MACs from it. */ |
3734 | list_for_each_entry(elem, &o->registry.exact_match.macs, link) { |
3735 | cfg_data.mac = &elem->mac[0]; |
3736 | o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); |
3737 | |
3738 | i++; |
3739 | |
3740 | DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n" , |
3741 | cfg_data.mac); |
3742 | } |
3743 | |
3744 | *rdata_idx = i; |
3745 | |
3746 | return -1; |
3747 | } |
3748 | |
3749 | static inline int bnx2x_mcast_handle_pending_cmds_e1( |
3750 | struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) |
3751 | { |
3752 | struct bnx2x_pending_mcast_cmd *cmd_pos; |
3753 | struct bnx2x_mcast_mac_elem *pmac_pos; |
3754 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3755 | union bnx2x_mcast_config_data cfg_data = {NULL}; |
3756 | int cnt = 0; |
3757 | |
3758 | /* If nothing to be done - return */ |
3759 | if (list_empty(head: &o->pending_cmds_head)) |
3760 | return 0; |
3761 | |
3762 | /* Handle the first command */ |
3763 | cmd_pos = list_first_entry(&o->pending_cmds_head, |
3764 | struct bnx2x_pending_mcast_cmd, link); |
3765 | |
3766 | switch (cmd_pos->type) { |
3767 | case BNX2X_MCAST_CMD_ADD: |
3768 | list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { |
3769 | cfg_data.mac = &pmac_pos->mac[0]; |
3770 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); |
3771 | |
3772 | cnt++; |
3773 | |
3774 | DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n" , |
3775 | pmac_pos->mac); |
3776 | } |
3777 | break; |
3778 | |
3779 | case BNX2X_MCAST_CMD_DEL: |
3780 | cnt = cmd_pos->data.macs_num; |
3781 | DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n" , cnt); |
3782 | break; |
3783 | |
3784 | case BNX2X_MCAST_CMD_RESTORE: |
3785 | o->hdl_restore(bp, o, 0, &cnt); |
3786 | break; |
3787 | |
3788 | default: |
3789 | BNX2X_ERR("Unknown command: %d\n" , cmd_pos->type); |
3790 | return -EINVAL; |
3791 | } |
3792 | |
3793 | list_del(entry: &cmd_pos->link); |
3794 | bnx2x_free_groups(mcast_group_list: &cmd_pos->group_head); |
3795 | kfree(objp: cmd_pos); |
3796 | |
3797 | return cnt; |
3798 | } |
3799 | |
3800 | /** |
3801 | * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). |
3802 | * |
3803 | * @fw_hi: address |
3804 | * @fw_mid: address |
3805 | * @fw_lo: address |
3806 | * @mac: mac address |
3807 | */ |
3808 | static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, |
3809 | __le16 *fw_lo, u8 *mac) |
3810 | { |
3811 | mac[1] = ((u8 *)fw_hi)[0]; |
3812 | mac[0] = ((u8 *)fw_hi)[1]; |
3813 | mac[3] = ((u8 *)fw_mid)[0]; |
3814 | mac[2] = ((u8 *)fw_mid)[1]; |
3815 | mac[5] = ((u8 *)fw_lo)[0]; |
3816 | mac[4] = ((u8 *)fw_lo)[1]; |
3817 | } |
3818 | |
3819 | /** |
3820 | * bnx2x_mcast_refresh_registry_e1 - |
3821 | * |
3822 | * @bp: device handle |
3823 | * @o: multicast info |
3824 | * |
3825 | * Check the ramrod data first entry flag to see if it's a DELETE or ADD command |
3826 | * and update the registry correspondingly: if ADD - allocate a memory and add |
3827 | * the entries to the registry (list), if DELETE - clear the registry and free |
3828 | * the memory. |
3829 | */ |
3830 | static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, |
3831 | struct bnx2x_mcast_obj *o) |
3832 | { |
3833 | struct bnx2x_raw_obj *raw = &o->raw; |
3834 | struct bnx2x_mcast_mac_elem *elem; |
3835 | struct mac_configuration_cmd *data = |
3836 | (struct mac_configuration_cmd *)(raw->rdata); |
3837 | |
3838 | /* If first entry contains a SET bit - the command was ADD, |
3839 | * otherwise - DEL_ALL |
3840 | */ |
3841 | if (GET_FLAG(data->config_table[0].flags, |
3842 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { |
3843 | int i, len = data->hdr.length; |
3844 | |
3845 | /* Break if it was a RESTORE command */ |
3846 | if (!list_empty(head: &o->registry.exact_match.macs)) |
3847 | return 0; |
3848 | |
3849 | elem = kcalloc(n: len, size: sizeof(*elem), GFP_ATOMIC); |
3850 | if (!elem) { |
3851 | BNX2X_ERR("Failed to allocate registry memory\n" ); |
3852 | return -ENOMEM; |
3853 | } |
3854 | |
3855 | for (i = 0; i < len; i++, elem++) { |
3856 | bnx2x_get_fw_mac_addr( |
3857 | fw_hi: &data->config_table[i].msb_mac_addr, |
3858 | fw_mid: &data->config_table[i].middle_mac_addr, |
3859 | fw_lo: &data->config_table[i].lsb_mac_addr, |
3860 | mac: elem->mac); |
3861 | DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n" , |
3862 | elem->mac); |
3863 | list_add_tail(new: &elem->link, |
3864 | head: &o->registry.exact_match.macs); |
3865 | } |
3866 | } else { |
3867 | elem = list_first_entry(&o->registry.exact_match.macs, |
3868 | struct bnx2x_mcast_mac_elem, link); |
3869 | DP(BNX2X_MSG_SP, "Deleting a registry\n" ); |
3870 | kfree(objp: elem); |
3871 | INIT_LIST_HEAD(list: &o->registry.exact_match.macs); |
3872 | } |
3873 | |
3874 | return 0; |
3875 | } |
3876 | |
3877 | static int bnx2x_mcast_setup_e1(struct bnx2x *bp, |
3878 | struct bnx2x_mcast_ramrod_params *p, |
3879 | enum bnx2x_mcast_cmd cmd) |
3880 | { |
3881 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3882 | struct bnx2x_raw_obj *raw = &o->raw; |
3883 | struct mac_configuration_cmd *data = |
3884 | (struct mac_configuration_cmd *)(raw->rdata); |
3885 | int cnt = 0, i, rc; |
3886 | |
3887 | /* Reset the ramrod data buffer */ |
3888 | memset(data, 0, sizeof(*data)); |
3889 | |
3890 | /* First set all entries as invalid */ |
3891 | for (i = 0; i < o->max_cmd_len ; i++) |
3892 | SET_FLAG(data->config_table[i].flags, |
3893 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
3894 | T_ETH_MAC_COMMAND_INVALIDATE); |
3895 | |
3896 | /* Handle pending commands first */ |
3897 | cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); |
3898 | |
3899 | /* If there are no more pending commands - clear SCHEDULED state */ |
3900 | if (list_empty(head: &o->pending_cmds_head)) |
3901 | o->clear_sched(o); |
3902 | |
3903 | /* The below may be true iff there were no pending commands */ |
3904 | if (!cnt) |
3905 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, start_cnt: 0); |
3906 | |
3907 | /* For 57710 every command has o->max_cmd_len length to ensure that |
3908 | * commands are done one at a time. |
3909 | */ |
3910 | o->total_pending_num -= o->max_cmd_len; |
3911 | |
3912 | /* send a ramrod */ |
3913 | |
3914 | WARN_ON(cnt > o->max_cmd_len); |
3915 | |
3916 | /* Set ramrod header (in particular, a number of entries to update) */ |
3917 | bnx2x_mcast_set_rdata_hdr_e1(bp, p, len: (u8)cnt); |
3918 | |
3919 | /* update a registry: we need the registry contents to be always up |
3920 | * to date in order to be able to execute a RESTORE opcode. Here |
3921 | * we use the fact that for 57710 we sent one command at a time |
3922 | * hence we may take the registry update out of the command handling |
3923 | * and do it in a simpler way here. |
3924 | */ |
3925 | rc = bnx2x_mcast_refresh_registry_e1(bp, o); |
3926 | if (rc) |
3927 | return rc; |
3928 | |
3929 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear |
3930 | * RAMROD_PENDING status immediately. |
3931 | */ |
3932 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
3933 | raw->clear_pending(raw); |
3934 | return 0; |
3935 | } else { |
3936 | /* No need for an explicit memory barrier here as long as we |
3937 | * ensure the ordering of writing to the SPQ element |
3938 | * and updating of the SPQ producer which involves a memory |
3939 | * read. If the memory read is removed we will have to put a |
3940 | * full memory barrier there (inside bnx2x_sp_post()). |
3941 | */ |
3942 | |
3943 | /* Send a ramrod */ |
3944 | rc = bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_SET_MAC, cid: raw->cid, |
3945 | U64_HI(raw->rdata_mapping), |
3946 | U64_LO(raw->rdata_mapping), |
3947 | cmd_type: ETH_CONNECTION_TYPE); |
3948 | if (rc) |
3949 | return rc; |
3950 | |
3951 | /* Ramrod completion is pending */ |
3952 | return 1; |
3953 | } |
3954 | } |
3955 | |
3956 | static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) |
3957 | { |
3958 | return o->registry.exact_match.num_macs_set; |
3959 | } |
3960 | |
3961 | static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) |
3962 | { |
3963 | return o->registry.aprox_match.num_bins_set; |
3964 | } |
3965 | |
3966 | static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, |
3967 | int n) |
3968 | { |
3969 | o->registry.exact_match.num_macs_set = n; |
3970 | } |
3971 | |
3972 | static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, |
3973 | int n) |
3974 | { |
3975 | o->registry.aprox_match.num_bins_set = n; |
3976 | } |
3977 | |
3978 | int bnx2x_config_mcast(struct bnx2x *bp, |
3979 | struct bnx2x_mcast_ramrod_params *p, |
3980 | enum bnx2x_mcast_cmd cmd) |
3981 | { |
3982 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3983 | struct bnx2x_raw_obj *r = &o->raw; |
3984 | int rc = 0, old_reg_size; |
3985 | |
3986 | /* This is needed to recover number of currently configured mcast macs |
3987 | * in case of failure. |
3988 | */ |
3989 | old_reg_size = o->get_registry_size(o); |
3990 | |
3991 | /* Do some calculations and checks */ |
3992 | rc = o->validate(bp, p, cmd); |
3993 | if (rc) |
3994 | return rc; |
3995 | |
3996 | /* Return if there is no work to do */ |
3997 | if ((!p->mcast_list_len) && (!o->check_sched(o))) |
3998 | return 0; |
3999 | |
4000 | DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n" , |
4001 | o->total_pending_num, p->mcast_list_len, o->max_cmd_len); |
4002 | |
4003 | /* Enqueue the current command to the pending list if we can't complete |
4004 | * it in the current iteration |
4005 | */ |
4006 | if (r->check_pending(r) || |
4007 | ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { |
4008 | rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); |
4009 | if (rc < 0) |
4010 | goto error_exit1; |
4011 | |
4012 | /* As long as the current command is in a command list we |
4013 | * don't need to handle it separately. |
4014 | */ |
4015 | p->mcast_list_len = 0; |
4016 | } |
4017 | |
4018 | if (!r->check_pending(r)) { |
4019 | |
4020 | /* Set 'pending' state */ |
4021 | r->set_pending(r); |
4022 | |
4023 | /* Configure the new classification in the chip */ |
4024 | rc = o->config_mcast(bp, p, cmd); |
4025 | if (rc < 0) |
4026 | goto error_exit2; |
4027 | |
4028 | /* Wait for a ramrod completion if was requested */ |
4029 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) |
4030 | rc = o->wait_comp(bp, o); |
4031 | } |
4032 | |
4033 | return rc; |
4034 | |
4035 | error_exit2: |
4036 | r->clear_pending(r); |
4037 | |
4038 | error_exit1: |
4039 | o->revert(bp, p, old_reg_size, cmd); |
4040 | |
4041 | return rc; |
4042 | } |
4043 | |
4044 | static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) |
4045 | { |
4046 | smp_mb__before_atomic(); |
4047 | clear_bit(nr: o->sched_state, addr: o->raw.pstate); |
4048 | smp_mb__after_atomic(); |
4049 | } |
4050 | |
4051 | static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) |
4052 | { |
4053 | smp_mb__before_atomic(); |
4054 | set_bit(nr: o->sched_state, addr: o->raw.pstate); |
4055 | smp_mb__after_atomic(); |
4056 | } |
4057 | |
4058 | static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) |
4059 | { |
4060 | return !!test_bit(o->sched_state, o->raw.pstate); |
4061 | } |
4062 | |
4063 | static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) |
4064 | { |
4065 | return o->raw.check_pending(&o->raw) || o->check_sched(o); |
4066 | } |
4067 | |
4068 | void bnx2x_init_mcast_obj(struct bnx2x *bp, |
4069 | struct bnx2x_mcast_obj *mcast_obj, |
4070 | u8 mcast_cl_id, u32 mcast_cid, u8 func_id, |
4071 | u8 engine_id, void *rdata, dma_addr_t rdata_mapping, |
4072 | int state, unsigned long *pstate, bnx2x_obj_type type) |
4073 | { |
4074 | memset(mcast_obj, 0, sizeof(*mcast_obj)); |
4075 | |
4076 | bnx2x_init_raw_obj(raw: &mcast_obj->raw, cl_id: mcast_cl_id, cid: mcast_cid, func_id, |
4077 | rdata, rdata_mapping, state, pstate, type); |
4078 | |
4079 | mcast_obj->engine_id = engine_id; |
4080 | |
4081 | INIT_LIST_HEAD(list: &mcast_obj->pending_cmds_head); |
4082 | |
4083 | mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; |
4084 | mcast_obj->check_sched = bnx2x_mcast_check_sched; |
4085 | mcast_obj->set_sched = bnx2x_mcast_set_sched; |
4086 | mcast_obj->clear_sched = bnx2x_mcast_clear_sched; |
4087 | |
4088 | if (CHIP_IS_E1(bp)) { |
4089 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1; |
4090 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; |
4091 | mcast_obj->hdl_restore = |
4092 | bnx2x_mcast_handle_restore_cmd_e1; |
4093 | mcast_obj->check_pending = bnx2x_mcast_check_pending; |
4094 | |
4095 | if (CHIP_REV_IS_SLOW(bp)) |
4096 | mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; |
4097 | else |
4098 | mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; |
4099 | |
4100 | mcast_obj->wait_comp = bnx2x_mcast_wait; |
4101 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; |
4102 | mcast_obj->validate = bnx2x_mcast_validate_e1; |
4103 | mcast_obj->revert = bnx2x_mcast_revert_e1; |
4104 | mcast_obj->get_registry_size = |
4105 | bnx2x_mcast_get_registry_size_exact; |
4106 | mcast_obj->set_registry_size = |
4107 | bnx2x_mcast_set_registry_size_exact; |
4108 | |
4109 | /* 57710 is the only chip that uses the exact match for mcast |
4110 | * at the moment. |
4111 | */ |
4112 | INIT_LIST_HEAD(list: &mcast_obj->registry.exact_match.macs); |
4113 | |
4114 | } else if (CHIP_IS_E1H(bp)) { |
4115 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; |
4116 | mcast_obj->enqueue_cmd = NULL; |
4117 | mcast_obj->hdl_restore = NULL; |
4118 | mcast_obj->check_pending = bnx2x_mcast_check_pending; |
4119 | |
4120 | /* 57711 doesn't send a ramrod, so it has unlimited credit |
4121 | * for one command. |
4122 | */ |
4123 | mcast_obj->max_cmd_len = -1; |
4124 | mcast_obj->wait_comp = bnx2x_mcast_wait; |
4125 | mcast_obj->set_one_rule = NULL; |
4126 | mcast_obj->validate = bnx2x_mcast_validate_e1h; |
4127 | mcast_obj->revert = bnx2x_mcast_revert_e1h; |
4128 | mcast_obj->get_registry_size = |
4129 | bnx2x_mcast_get_registry_size_aprox; |
4130 | mcast_obj->set_registry_size = |
4131 | bnx2x_mcast_set_registry_size_aprox; |
4132 | } else { |
4133 | mcast_obj->config_mcast = bnx2x_mcast_setup_e2; |
4134 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; |
4135 | mcast_obj->hdl_restore = |
4136 | bnx2x_mcast_handle_restore_cmd_e2; |
4137 | mcast_obj->check_pending = bnx2x_mcast_check_pending; |
4138 | /* TODO: There should be a proper HSI define for this number!!! |
4139 | */ |
4140 | mcast_obj->max_cmd_len = 16; |
4141 | mcast_obj->wait_comp = bnx2x_mcast_wait; |
4142 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; |
4143 | mcast_obj->validate = bnx2x_mcast_validate_e2; |
4144 | mcast_obj->revert = bnx2x_mcast_revert_e2; |
4145 | mcast_obj->get_registry_size = |
4146 | bnx2x_mcast_get_registry_size_aprox; |
4147 | mcast_obj->set_registry_size = |
4148 | bnx2x_mcast_set_registry_size_aprox; |
4149 | } |
4150 | } |
4151 | |
4152 | /*************************** Credit handling **********************************/ |
4153 | |
4154 | /** |
4155 | * __atomic_add_ifless - add if the result is less than a given value. |
4156 | * |
4157 | * @v: pointer of type atomic_t |
4158 | * @a: the amount to add to v... |
4159 | * @u: ...if (v + a) is less than u. |
4160 | * |
4161 | * returns true if (v + a) was less than u, and false otherwise. |
4162 | * |
4163 | */ |
4164 | static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) |
4165 | { |
4166 | int c, old; |
4167 | |
4168 | c = atomic_read(v); |
4169 | for (;;) { |
4170 | if (unlikely(c + a >= u)) |
4171 | return false; |
4172 | |
4173 | old = atomic_cmpxchg((v), old: c, new: c + a); |
4174 | if (likely(old == c)) |
4175 | break; |
4176 | c = old; |
4177 | } |
4178 | |
4179 | return true; |
4180 | } |
4181 | |
4182 | /** |
4183 | * __atomic_dec_ifmoe - dec if the result is more or equal than a given value. |
4184 | * |
4185 | * @v: pointer of type atomic_t |
4186 | * @a: the amount to dec from v... |
4187 | * @u: ...if (v - a) is more or equal than u. |
4188 | * |
4189 | * returns true if (v - a) was more or equal than u, and false |
4190 | * otherwise. |
4191 | */ |
4192 | static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) |
4193 | { |
4194 | int c, old; |
4195 | |
4196 | c = atomic_read(v); |
4197 | for (;;) { |
4198 | if (unlikely(c - a < u)) |
4199 | return false; |
4200 | |
4201 | old = atomic_cmpxchg((v), old: c, new: c - a); |
4202 | if (likely(old == c)) |
4203 | break; |
4204 | c = old; |
4205 | } |
4206 | |
4207 | return true; |
4208 | } |
4209 | |
4210 | static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) |
4211 | { |
4212 | bool rc; |
4213 | |
4214 | smp_mb(); |
4215 | rc = __atomic_dec_ifmoe(v: &o->credit, a: cnt, u: 0); |
4216 | smp_mb(); |
4217 | |
4218 | return rc; |
4219 | } |
4220 | |
4221 | static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) |
4222 | { |
4223 | bool rc; |
4224 | |
4225 | smp_mb(); |
4226 | |
4227 | /* Don't let to refill if credit + cnt > pool_sz */ |
4228 | rc = __atomic_add_ifless(v: &o->credit, a: cnt, u: o->pool_sz + 1); |
4229 | |
4230 | smp_mb(); |
4231 | |
4232 | return rc; |
4233 | } |
4234 | |
4235 | static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) |
4236 | { |
4237 | int cur_credit; |
4238 | |
4239 | smp_mb(); |
4240 | cur_credit = atomic_read(v: &o->credit); |
4241 | |
4242 | return cur_credit; |
4243 | } |
4244 | |
4245 | static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, |
4246 | int cnt) |
4247 | { |
4248 | return true; |
4249 | } |
4250 | |
4251 | static bool bnx2x_credit_pool_get_entry( |
4252 | struct bnx2x_credit_pool_obj *o, |
4253 | int *offset) |
4254 | { |
4255 | int idx, vec, i; |
4256 | |
4257 | *offset = -1; |
4258 | |
4259 | /* Find "internal cam-offset" then add to base for this object... */ |
4260 | for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { |
4261 | |
4262 | /* Skip the current vector if there are no free entries in it */ |
4263 | if (!o->pool_mirror[vec]) |
4264 | continue; |
4265 | |
4266 | /* If we've got here we are going to find a free entry */ |
4267 | for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; |
4268 | i < BIT_VEC64_ELEM_SZ; idx++, i++) |
4269 | |
4270 | if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { |
4271 | /* Got one!! */ |
4272 | BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); |
4273 | *offset = o->base_pool_offset + idx; |
4274 | return true; |
4275 | } |
4276 | } |
4277 | |
4278 | return false; |
4279 | } |
4280 | |
4281 | static bool bnx2x_credit_pool_put_entry( |
4282 | struct bnx2x_credit_pool_obj *o, |
4283 | int offset) |
4284 | { |
4285 | if (offset < o->base_pool_offset) |
4286 | return false; |
4287 | |
4288 | offset -= o->base_pool_offset; |
4289 | |
4290 | if (offset >= o->pool_sz) |
4291 | return false; |
4292 | |
4293 | /* Return the entry to the pool */ |
4294 | BIT_VEC64_SET_BIT(o->pool_mirror, offset); |
4295 | |
4296 | return true; |
4297 | } |
4298 | |
4299 | static bool bnx2x_credit_pool_put_entry_always_true( |
4300 | struct bnx2x_credit_pool_obj *o, |
4301 | int offset) |
4302 | { |
4303 | return true; |
4304 | } |
4305 | |
4306 | static bool bnx2x_credit_pool_get_entry_always_true( |
4307 | struct bnx2x_credit_pool_obj *o, |
4308 | int *offset) |
4309 | { |
4310 | *offset = -1; |
4311 | return true; |
4312 | } |
4313 | /** |
4314 | * bnx2x_init_credit_pool - initialize credit pool internals. |
4315 | * |
4316 | * @p: credit pool |
4317 | * @base: Base entry in the CAM to use. |
4318 | * @credit: pool size. |
4319 | * |
4320 | * If base is negative no CAM entries handling will be performed. |
4321 | * If credit is negative pool operations will always succeed (unlimited pool). |
4322 | * |
4323 | */ |
4324 | void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, |
4325 | int base, int credit) |
4326 | { |
4327 | /* Zero the object first */ |
4328 | memset(p, 0, sizeof(*p)); |
4329 | |
4330 | /* Set the table to all 1s */ |
4331 | memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); |
4332 | |
4333 | /* Init a pool as full */ |
4334 | atomic_set(v: &p->credit, i: credit); |
4335 | |
4336 | /* The total poll size */ |
4337 | p->pool_sz = credit; |
4338 | |
4339 | p->base_pool_offset = base; |
4340 | |
4341 | /* Commit the change */ |
4342 | smp_mb(); |
4343 | |
4344 | p->check = bnx2x_credit_pool_check; |
4345 | |
4346 | /* if pool credit is negative - disable the checks */ |
4347 | if (credit >= 0) { |
4348 | p->put = bnx2x_credit_pool_put; |
4349 | p->get = bnx2x_credit_pool_get; |
4350 | p->put_entry = bnx2x_credit_pool_put_entry; |
4351 | p->get_entry = bnx2x_credit_pool_get_entry; |
4352 | } else { |
4353 | p->put = bnx2x_credit_pool_always_true; |
4354 | p->get = bnx2x_credit_pool_always_true; |
4355 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; |
4356 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; |
4357 | } |
4358 | |
4359 | /* If base is negative - disable entries handling */ |
4360 | if (base < 0) { |
4361 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; |
4362 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; |
4363 | } |
4364 | } |
4365 | |
4366 | void bnx2x_init_mac_credit_pool(struct bnx2x *bp, |
4367 | struct bnx2x_credit_pool_obj *p, u8 func_id, |
4368 | u8 func_num) |
4369 | { |
4370 | /* TODO: this will be defined in consts as well... */ |
4371 | #define BNX2X_CAM_SIZE_EMUL 5 |
4372 | |
4373 | int cam_sz; |
4374 | |
4375 | if (CHIP_IS_E1(bp)) { |
4376 | /* In E1, Multicast is saved in cam... */ |
4377 | if (!CHIP_REV_IS_SLOW(bp)) |
4378 | cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; |
4379 | else |
4380 | cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; |
4381 | |
4382 | bnx2x_init_credit_pool(p, base: func_id * cam_sz, credit: cam_sz); |
4383 | |
4384 | } else if (CHIP_IS_E1H(bp)) { |
4385 | /* CAM credit is equaly divided between all active functions |
4386 | * on the PORT!. |
4387 | */ |
4388 | if ((func_num > 0)) { |
4389 | if (!CHIP_REV_IS_SLOW(bp)) |
4390 | cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); |
4391 | else |
4392 | cam_sz = BNX2X_CAM_SIZE_EMUL; |
4393 | bnx2x_init_credit_pool(p, base: func_id * cam_sz, credit: cam_sz); |
4394 | } else { |
4395 | /* this should never happen! Block MAC operations. */ |
4396 | bnx2x_init_credit_pool(p, base: 0, credit: 0); |
4397 | } |
4398 | |
4399 | } else { |
4400 | |
4401 | /* CAM credit is equaly divided between all active functions |
4402 | * on the PATH. |
4403 | */ |
4404 | if (func_num > 0) { |
4405 | if (!CHIP_REV_IS_SLOW(bp)) |
4406 | cam_sz = PF_MAC_CREDIT_E2(bp, func_num); |
4407 | else |
4408 | cam_sz = BNX2X_CAM_SIZE_EMUL; |
4409 | |
4410 | /* No need for CAM entries handling for 57712 and |
4411 | * newer. |
4412 | */ |
4413 | bnx2x_init_credit_pool(p, base: -1, credit: cam_sz); |
4414 | } else { |
4415 | /* this should never happen! Block MAC operations. */ |
4416 | bnx2x_init_credit_pool(p, base: 0, credit: 0); |
4417 | } |
4418 | } |
4419 | } |
4420 | |
4421 | void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, |
4422 | struct bnx2x_credit_pool_obj *p, |
4423 | u8 func_id, |
4424 | u8 func_num) |
4425 | { |
4426 | if (CHIP_IS_E1x(bp)) { |
4427 | /* There is no VLAN credit in HW on 57710 and 57711 only |
4428 | * MAC / MAC-VLAN can be set |
4429 | */ |
4430 | bnx2x_init_credit_pool(p, base: 0, credit: -1); |
4431 | } else { |
4432 | /* CAM credit is equally divided between all active functions |
4433 | * on the PATH. |
4434 | */ |
4435 | if (func_num > 0) { |
4436 | int credit = PF_VLAN_CREDIT_E2(bp, func_num); |
4437 | |
4438 | bnx2x_init_credit_pool(p, base: -1/*unused for E2*/, credit); |
4439 | } else |
4440 | /* this should never happen! Block VLAN operations. */ |
4441 | bnx2x_init_credit_pool(p, base: 0, credit: 0); |
4442 | } |
4443 | } |
4444 | |
4445 | /****************** RSS Configuration ******************/ |
4446 | /** |
4447 | * bnx2x_debug_print_ind_table - prints the indirection table configuration. |
4448 | * |
4449 | * @bp: driver handle |
4450 | * @p: pointer to rss configuration |
4451 | * |
4452 | * Prints it when NETIF_MSG_IFUP debug level is configured. |
4453 | */ |
4454 | static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, |
4455 | struct bnx2x_config_rss_params *p) |
4456 | { |
4457 | int i; |
4458 | |
4459 | DP(BNX2X_MSG_SP, "Setting indirection table to:\n" ); |
4460 | DP(BNX2X_MSG_SP, "0x0000: " ); |
4461 | for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { |
4462 | DP_CONT(BNX2X_MSG_SP, "0x%02x " , p->ind_table[i]); |
4463 | |
4464 | /* Print 4 bytes in a line */ |
4465 | if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && |
4466 | (((i + 1) & 0x3) == 0)) { |
4467 | DP_CONT(BNX2X_MSG_SP, "\n" ); |
4468 | DP(BNX2X_MSG_SP, "0x%04x: " , i + 1); |
4469 | } |
4470 | } |
4471 | |
4472 | DP_CONT(BNX2X_MSG_SP, "\n" ); |
4473 | } |
4474 | |
4475 | /** |
4476 | * bnx2x_setup_rss - configure RSS |
4477 | * |
4478 | * @bp: device handle |
4479 | * @p: rss configuration |
4480 | * |
4481 | * sends on UPDATE ramrod for that matter. |
4482 | */ |
4483 | static int (struct bnx2x *bp, |
4484 | struct bnx2x_config_rss_params *p) |
4485 | { |
4486 | struct bnx2x_rss_config_obj *o = p->rss_obj; |
4487 | struct bnx2x_raw_obj *r = &o->raw; |
4488 | struct eth_rss_update_ramrod_data *data = |
4489 | (struct eth_rss_update_ramrod_data *)(r->rdata); |
4490 | u16 caps = 0; |
4491 | u8 = 0; |
4492 | int rc; |
4493 | |
4494 | memset(data, 0, sizeof(*data)); |
4495 | |
4496 | DP(BNX2X_MSG_SP, "Configuring RSS\n" ); |
4497 | |
4498 | /* Set an echo field */ |
4499 | data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | |
4500 | (r->state << BNX2X_SWCID_SHIFT)); |
4501 | |
4502 | /* RSS mode */ |
4503 | if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) |
4504 | rss_mode = ETH_RSS_MODE_DISABLED; |
4505 | else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) |
4506 | rss_mode = ETH_RSS_MODE_REGULAR; |
4507 | |
4508 | data->rss_mode = rss_mode; |
4509 | |
4510 | DP(BNX2X_MSG_SP, "rss_mode=%d\n" , rss_mode); |
4511 | |
4512 | /* RSS capabilities */ |
4513 | if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) |
4514 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; |
4515 | |
4516 | if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) |
4517 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; |
4518 | |
4519 | if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) |
4520 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; |
4521 | |
4522 | if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) |
4523 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; |
4524 | |
4525 | if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) |
4526 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; |
4527 | |
4528 | if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) |
4529 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; |
4530 | |
4531 | if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags)) |
4532 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; |
4533 | |
4534 | if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags)) |
4535 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; |
4536 | |
4537 | if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags)) |
4538 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY; |
4539 | |
4540 | /* RSS keys */ |
4541 | if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { |
4542 | u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key); |
4543 | const u8 *src = (const u8 *)p->rss_key; |
4544 | int i; |
4545 | |
4546 | /* Apparently, bnx2x reads this array in reverse order |
4547 | * We need to byte swap rss_key to comply with Toeplitz specs. |
4548 | */ |
4549 | for (i = 0; i < sizeof(data->rss_key); i++) |
4550 | *--dst = *src++; |
4551 | |
4552 | caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; |
4553 | } |
4554 | |
4555 | data->capabilities = cpu_to_le16(caps); |
4556 | |
4557 | /* Hashing mask */ |
4558 | data->rss_result_mask = p->rss_result_mask; |
4559 | |
4560 | /* RSS engine ID */ |
4561 | data->rss_engine_id = o->engine_id; |
4562 | |
4563 | DP(BNX2X_MSG_SP, "rss_engine_id=%d\n" , data->rss_engine_id); |
4564 | |
4565 | /* Indirection table */ |
4566 | memcpy(data->indirection_table, p->ind_table, |
4567 | T_ETH_INDIRECTION_TABLE_SIZE); |
4568 | |
4569 | /* Remember the last configuration */ |
4570 | memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); |
4571 | |
4572 | /* Print the indirection table */ |
4573 | if (netif_msg_ifup(bp)) |
4574 | bnx2x_debug_print_ind_table(bp, p); |
4575 | |
4576 | /* No need for an explicit memory barrier here as long as we |
4577 | * ensure the ordering of writing to the SPQ element |
4578 | * and updating of the SPQ producer which involves a memory |
4579 | * read. If the memory read is removed we will have to put a |
4580 | * full memory barrier there (inside bnx2x_sp_post()). |
4581 | */ |
4582 | |
4583 | /* Send a ramrod */ |
4584 | rc = bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_RSS_UPDATE, cid: r->cid, |
4585 | U64_HI(r->rdata_mapping), |
4586 | U64_LO(r->rdata_mapping), |
4587 | cmd_type: ETH_CONNECTION_TYPE); |
4588 | |
4589 | if (rc < 0) |
4590 | return rc; |
4591 | |
4592 | return 1; |
4593 | } |
4594 | |
4595 | void (struct bnx2x_rss_config_obj *, |
4596 | u8 *ind_table) |
4597 | { |
4598 | memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); |
4599 | } |
4600 | |
4601 | int (struct bnx2x *bp, |
4602 | struct bnx2x_config_rss_params *p) |
4603 | { |
4604 | int rc; |
4605 | struct bnx2x_rss_config_obj *o = p->rss_obj; |
4606 | struct bnx2x_raw_obj *r = &o->raw; |
4607 | |
4608 | /* Do nothing if only driver cleanup was requested */ |
4609 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
4610 | DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n" , |
4611 | p->ramrod_flags); |
4612 | return 0; |
4613 | } |
4614 | |
4615 | r->set_pending(r); |
4616 | |
4617 | rc = o->config_rss(bp, p); |
4618 | if (rc < 0) { |
4619 | r->clear_pending(r); |
4620 | return rc; |
4621 | } |
4622 | |
4623 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) |
4624 | rc = r->wait_comp(bp, r); |
4625 | |
4626 | return rc; |
4627 | } |
4628 | |
4629 | void (struct bnx2x *bp, |
4630 | struct bnx2x_rss_config_obj *, |
4631 | u8 cl_id, u32 cid, u8 func_id, u8 engine_id, |
4632 | void *rdata, dma_addr_t rdata_mapping, |
4633 | int state, unsigned long *pstate, |
4634 | bnx2x_obj_type type) |
4635 | { |
4636 | bnx2x_init_raw_obj(raw: &rss_obj->raw, cl_id, cid, func_id, rdata, |
4637 | rdata_mapping, state, pstate, type); |
4638 | |
4639 | rss_obj->engine_id = engine_id; |
4640 | rss_obj->config_rss = bnx2x_setup_rss; |
4641 | } |
4642 | |
4643 | /********************** Queue state object ***********************************/ |
4644 | |
4645 | /** |
4646 | * bnx2x_queue_state_change - perform Queue state change transition |
4647 | * |
4648 | * @bp: device handle |
4649 | * @params: parameters to perform the transition |
4650 | * |
4651 | * returns 0 in case of successfully completed transition, negative error |
4652 | * code in case of failure, positive (EBUSY) value if there is a completion |
4653 | * to that is still pending (possible only if RAMROD_COMP_WAIT is |
4654 | * not set in params->ramrod_flags for asynchronous commands). |
4655 | * |
4656 | */ |
4657 | int bnx2x_queue_state_change(struct bnx2x *bp, |
4658 | struct bnx2x_queue_state_params *params) |
4659 | { |
4660 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
4661 | int rc, pending_bit; |
4662 | unsigned long *pending = &o->pending; |
4663 | |
4664 | /* Check that the requested transition is legal */ |
4665 | rc = o->check_transition(bp, o, params); |
4666 | if (rc) { |
4667 | BNX2X_ERR("check transition returned an error. rc %d\n" , rc); |
4668 | return -EINVAL; |
4669 | } |
4670 | |
4671 | /* Set "pending" bit */ |
4672 | DP(BNX2X_MSG_SP, "pending bit was=%lx\n" , o->pending); |
4673 | pending_bit = o->set_pending(o, params); |
4674 | DP(BNX2X_MSG_SP, "pending bit now=%lx\n" , o->pending); |
4675 | |
4676 | /* Don't send a command if only driver cleanup was requested */ |
4677 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) |
4678 | o->complete_cmd(bp, o, pending_bit); |
4679 | else { |
4680 | /* Send a ramrod */ |
4681 | rc = o->send_cmd(bp, params); |
4682 | if (rc) { |
4683 | o->next_state = BNX2X_Q_STATE_MAX; |
4684 | clear_bit(nr: pending_bit, addr: pending); |
4685 | smp_mb__after_atomic(); |
4686 | return rc; |
4687 | } |
4688 | |
4689 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { |
4690 | rc = o->wait_comp(bp, o, pending_bit); |
4691 | if (rc) |
4692 | return rc; |
4693 | |
4694 | return 0; |
4695 | } |
4696 | } |
4697 | |
4698 | return !!test_bit(pending_bit, pending); |
4699 | } |
4700 | |
4701 | static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, |
4702 | struct bnx2x_queue_state_params *params) |
4703 | { |
4704 | enum bnx2x_queue_cmd cmd = params->cmd, bit; |
4705 | |
4706 | /* ACTIVATE and DEACTIVATE commands are implemented on top of |
4707 | * UPDATE command. |
4708 | */ |
4709 | if ((cmd == BNX2X_Q_CMD_ACTIVATE) || |
4710 | (cmd == BNX2X_Q_CMD_DEACTIVATE)) |
4711 | bit = BNX2X_Q_CMD_UPDATE; |
4712 | else |
4713 | bit = cmd; |
4714 | |
4715 | set_bit(nr: bit, addr: &obj->pending); |
4716 | return bit; |
4717 | } |
4718 | |
4719 | static int bnx2x_queue_wait_comp(struct bnx2x *bp, |
4720 | struct bnx2x_queue_sp_obj *o, |
4721 | enum bnx2x_queue_cmd cmd) |
4722 | { |
4723 | return bnx2x_state_wait(bp, state: cmd, pstate: &o->pending); |
4724 | } |
4725 | |
4726 | /** |
4727 | * bnx2x_queue_comp_cmd - complete the state change command. |
4728 | * |
4729 | * @bp: device handle |
4730 | * @o: queue info |
4731 | * @cmd: command to exec |
4732 | * |
4733 | * Checks that the arrived completion is expected. |
4734 | */ |
4735 | static int bnx2x_queue_comp_cmd(struct bnx2x *bp, |
4736 | struct bnx2x_queue_sp_obj *o, |
4737 | enum bnx2x_queue_cmd cmd) |
4738 | { |
4739 | unsigned long cur_pending = o->pending; |
4740 | |
4741 | if (!test_and_clear_bit(nr: cmd, addr: &cur_pending)) { |
4742 | BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n" , |
4743 | cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], |
4744 | o->state, cur_pending, o->next_state); |
4745 | return -EINVAL; |
4746 | } |
4747 | |
4748 | if (o->next_tx_only >= o->max_cos) |
4749 | /* >= because tx only must always be smaller than cos since the |
4750 | * primary connection supports COS 0 |
4751 | */ |
4752 | BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d" , |
4753 | o->next_tx_only, o->max_cos); |
4754 | |
4755 | DP(BNX2X_MSG_SP, |
4756 | "Completing command %d for queue %d, setting state to %d\n" , |
4757 | cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); |
4758 | |
4759 | if (o->next_tx_only) /* print num tx-only if any exist */ |
4760 | DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n" , |
4761 | o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); |
4762 | |
4763 | o->state = o->next_state; |
4764 | o->num_tx_only = o->next_tx_only; |
4765 | o->next_state = BNX2X_Q_STATE_MAX; |
4766 | |
4767 | /* It's important that o->state and o->next_state are |
4768 | * updated before o->pending. |
4769 | */ |
4770 | wmb(); |
4771 | |
4772 | clear_bit(nr: cmd, addr: &o->pending); |
4773 | smp_mb__after_atomic(); |
4774 | |
4775 | return 0; |
4776 | } |
4777 | |
4778 | static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, |
4779 | struct bnx2x_queue_state_params *cmd_params, |
4780 | struct client_init_ramrod_data *data) |
4781 | { |
4782 | struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; |
4783 | |
4784 | /* Rx data */ |
4785 | |
4786 | /* IPv6 TPA supported for E2 and above only */ |
4787 | data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * |
4788 | CLIENT_INIT_RX_DATA_TPA_EN_IPV6; |
4789 | } |
4790 | |
4791 | static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, |
4792 | struct bnx2x_queue_sp_obj *o, |
4793 | struct bnx2x_general_setup_params *params, |
4794 | struct client_init_general_data *gen_data, |
4795 | unsigned long *flags) |
4796 | { |
4797 | gen_data->client_id = o->cl_id; |
4798 | |
4799 | if (test_bit(BNX2X_Q_FLG_STATS, flags)) { |
4800 | gen_data->statistics_counter_id = |
4801 | params->stat_id; |
4802 | gen_data->statistics_en_flg = 1; |
4803 | gen_data->statistics_zero_flg = |
4804 | test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); |
4805 | } else |
4806 | gen_data->statistics_counter_id = |
4807 | DISABLE_STATISTIC_COUNTER_ID_VALUE; |
4808 | |
4809 | gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); |
4810 | gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); |
4811 | gen_data->sp_client_id = params->spcl_id; |
4812 | gen_data->mtu = cpu_to_le16(params->mtu); |
4813 | gen_data->func_id = o->func_id; |
4814 | |
4815 | gen_data->cos = params->cos; |
4816 | |
4817 | gen_data->traffic_type = |
4818 | test_bit(BNX2X_Q_FLG_FCOE, flags) ? |
4819 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; |
4820 | |
4821 | gen_data->fp_hsi_ver = params->fp_hsi; |
4822 | |
4823 | DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n" , |
4824 | gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); |
4825 | } |
4826 | |
4827 | static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, |
4828 | struct bnx2x_txq_setup_params *params, |
4829 | struct client_init_tx_data *tx_data, |
4830 | unsigned long *flags) |
4831 | { |
4832 | tx_data->enforce_security_flg = |
4833 | test_bit(BNX2X_Q_FLG_TX_SEC, flags); |
4834 | tx_data->default_vlan = |
4835 | cpu_to_le16(params->default_vlan); |
4836 | tx_data->default_vlan_flg = |
4837 | test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); |
4838 | tx_data->tx_switching_flg = |
4839 | test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); |
4840 | tx_data->anti_spoofing_flg = |
4841 | test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); |
4842 | tx_data->force_default_pri_flg = |
4843 | test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); |
4844 | tx_data->refuse_outband_vlan_flg = |
4845 | test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); |
4846 | tx_data->tunnel_lso_inc_ip_id = |
4847 | test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); |
4848 | tx_data->tunnel_non_lso_pcsum_location = |
4849 | test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : |
4850 | CSUM_ON_BD; |
4851 | |
4852 | tx_data->tx_status_block_id = params->fw_sb_id; |
4853 | tx_data->tx_sb_index_number = params->sb_cq_index; |
4854 | tx_data->tss_leading_client_id = params->tss_leading_cl_id; |
4855 | |
4856 | tx_data->tx_bd_page_base.lo = |
4857 | cpu_to_le32(U64_LO(params->dscr_map)); |
4858 | tx_data->tx_bd_page_base.hi = |
4859 | cpu_to_le32(U64_HI(params->dscr_map)); |
4860 | |
4861 | /* Don't configure any Tx switching mode during queue SETUP */ |
4862 | tx_data->state = 0; |
4863 | } |
4864 | |
4865 | static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, |
4866 | struct rxq_pause_params *params, |
4867 | struct client_init_rx_data *rx_data) |
4868 | { |
4869 | /* flow control data */ |
4870 | rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); |
4871 | rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); |
4872 | rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); |
4873 | rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); |
4874 | rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); |
4875 | rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); |
4876 | rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); |
4877 | } |
4878 | |
4879 | static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, |
4880 | struct bnx2x_rxq_setup_params *params, |
4881 | struct client_init_rx_data *rx_data, |
4882 | unsigned long *flags) |
4883 | { |
4884 | rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * |
4885 | CLIENT_INIT_RX_DATA_TPA_EN_IPV4; |
4886 | rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * |
4887 | CLIENT_INIT_RX_DATA_TPA_MODE; |
4888 | rx_data->vmqueue_mode_en_flg = 0; |
4889 | |
4890 | rx_data->cache_line_alignment_log_size = |
4891 | params->cache_line_log; |
4892 | rx_data->enable_dynamic_hc = |
4893 | test_bit(BNX2X_Q_FLG_DHC, flags); |
4894 | rx_data->max_sges_for_packet = params->max_sges_pkt; |
4895 | rx_data->client_qzone_id = params->cl_qzone_id; |
4896 | rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); |
4897 | |
4898 | /* Always start in DROP_ALL mode */ |
4899 | rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | |
4900 | CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); |
4901 | |
4902 | /* We don't set drop flags */ |
4903 | rx_data->drop_ip_cs_err_flg = 0; |
4904 | rx_data->drop_tcp_cs_err_flg = 0; |
4905 | rx_data->drop_ttl0_flg = 0; |
4906 | rx_data->drop_udp_cs_err_flg = 0; |
4907 | rx_data->inner_vlan_removal_enable_flg = |
4908 | test_bit(BNX2X_Q_FLG_VLAN, flags); |
4909 | rx_data->outer_vlan_removal_enable_flg = |
4910 | test_bit(BNX2X_Q_FLG_OV, flags); |
4911 | rx_data->status_block_id = params->fw_sb_id; |
4912 | rx_data->rx_sb_index_number = params->sb_cq_index; |
4913 | rx_data->max_tpa_queues = params->max_tpa_queues; |
4914 | rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); |
4915 | rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); |
4916 | rx_data->bd_page_base.lo = |
4917 | cpu_to_le32(U64_LO(params->dscr_map)); |
4918 | rx_data->bd_page_base.hi = |
4919 | cpu_to_le32(U64_HI(params->dscr_map)); |
4920 | rx_data->sge_page_base.lo = |
4921 | cpu_to_le32(U64_LO(params->sge_map)); |
4922 | rx_data->sge_page_base.hi = |
4923 | cpu_to_le32(U64_HI(params->sge_map)); |
4924 | rx_data->cqe_page_base.lo = |
4925 | cpu_to_le32(U64_LO(params->rcq_map)); |
4926 | rx_data->cqe_page_base.hi = |
4927 | cpu_to_le32(U64_HI(params->rcq_map)); |
4928 | rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); |
4929 | |
4930 | if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { |
4931 | rx_data->approx_mcast_engine_id = params->mcast_engine_id; |
4932 | rx_data->is_approx_mcast = 1; |
4933 | } |
4934 | |
4935 | rx_data->rss_engine_id = params->rss_engine_id; |
4936 | |
4937 | /* silent vlan removal */ |
4938 | rx_data->silent_vlan_removal_flg = |
4939 | test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); |
4940 | rx_data->silent_vlan_value = |
4941 | cpu_to_le16(params->silent_removal_value); |
4942 | rx_data->silent_vlan_mask = |
4943 | cpu_to_le16(params->silent_removal_mask); |
4944 | } |
4945 | |
4946 | /* initialize the general, tx and rx parts of a queue object */ |
4947 | static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, |
4948 | struct bnx2x_queue_state_params *cmd_params, |
4949 | struct client_init_ramrod_data *data) |
4950 | { |
4951 | bnx2x_q_fill_init_general_data(bp, o: cmd_params->q_obj, |
4952 | params: &cmd_params->params.setup.gen_params, |
4953 | gen_data: &data->general, |
4954 | flags: &cmd_params->params.setup.flags); |
4955 | |
4956 | bnx2x_q_fill_init_tx_data(o: cmd_params->q_obj, |
4957 | params: &cmd_params->params.setup.txq_params, |
4958 | tx_data: &data->tx, |
4959 | flags: &cmd_params->params.setup.flags); |
4960 | |
4961 | bnx2x_q_fill_init_rx_data(o: cmd_params->q_obj, |
4962 | params: &cmd_params->params.setup.rxq_params, |
4963 | rx_data: &data->rx, |
4964 | flags: &cmd_params->params.setup.flags); |
4965 | |
4966 | bnx2x_q_fill_init_pause_data(o: cmd_params->q_obj, |
4967 | params: &cmd_params->params.setup.pause_params, |
4968 | rx_data: &data->rx); |
4969 | } |
4970 | |
4971 | /* initialize the general and tx parts of a tx-only queue object */ |
4972 | static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, |
4973 | struct bnx2x_queue_state_params *cmd_params, |
4974 | struct tx_queue_init_ramrod_data *data) |
4975 | { |
4976 | bnx2x_q_fill_init_general_data(bp, o: cmd_params->q_obj, |
4977 | params: &cmd_params->params.tx_only.gen_params, |
4978 | gen_data: &data->general, |
4979 | flags: &cmd_params->params.tx_only.flags); |
4980 | |
4981 | bnx2x_q_fill_init_tx_data(o: cmd_params->q_obj, |
4982 | params: &cmd_params->params.tx_only.txq_params, |
4983 | tx_data: &data->tx, |
4984 | flags: &cmd_params->params.tx_only.flags); |
4985 | |
4986 | DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x" , |
4987 | cmd_params->q_obj->cids[0], |
4988 | data->tx.tx_bd_page_base.lo, |
4989 | data->tx.tx_bd_page_base.hi); |
4990 | } |
4991 | |
4992 | /** |
4993 | * bnx2x_q_init - init HW/FW queue |
4994 | * |
4995 | * @bp: device handle |
4996 | * @params: |
4997 | * |
4998 | * HW/FW initial Queue configuration: |
4999 | * - HC: Rx and Tx |
5000 | * - CDU context validation |
5001 | * |
5002 | */ |
5003 | static inline int bnx2x_q_init(struct bnx2x *bp, |
5004 | struct bnx2x_queue_state_params *params) |
5005 | { |
5006 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5007 | struct bnx2x_queue_init_params *init = ¶ms->params.init; |
5008 | u16 hc_usec; |
5009 | u8 cos; |
5010 | |
5011 | /* Tx HC configuration */ |
5012 | if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && |
5013 | test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { |
5014 | hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; |
5015 | |
5016 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id: init->tx.fw_sb_id, |
5017 | sb_index: init->tx.sb_cq_index, |
5018 | disable: !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), |
5019 | usec: hc_usec); |
5020 | } |
5021 | |
5022 | /* Rx HC configuration */ |
5023 | if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && |
5024 | test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { |
5025 | hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; |
5026 | |
5027 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id: init->rx.fw_sb_id, |
5028 | sb_index: init->rx.sb_cq_index, |
5029 | disable: !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), |
5030 | usec: hc_usec); |
5031 | } |
5032 | |
5033 | /* Set CDU context validation values */ |
5034 | for (cos = 0; cos < o->max_cos; cos++) { |
5035 | DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n" , |
5036 | o->cids[cos], cos); |
5037 | DP(BNX2X_MSG_SP, "context pointer %p\n" , init->cxts[cos]); |
5038 | bnx2x_set_ctx_validation(bp, cxt: init->cxts[cos], cid: o->cids[cos]); |
5039 | } |
5040 | |
5041 | /* As no ramrod is sent, complete the command immediately */ |
5042 | o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); |
5043 | |
5044 | smp_mb(); |
5045 | |
5046 | return 0; |
5047 | } |
5048 | |
5049 | static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, |
5050 | struct bnx2x_queue_state_params *params) |
5051 | { |
5052 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5053 | struct client_init_ramrod_data *rdata = |
5054 | (struct client_init_ramrod_data *)o->rdata; |
5055 | dma_addr_t data_mapping = o->rdata_mapping; |
5056 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; |
5057 | |
5058 | /* Clear the ramrod data */ |
5059 | memset(rdata, 0, sizeof(*rdata)); |
5060 | |
5061 | /* Fill the ramrod data */ |
5062 | bnx2x_q_fill_setup_data_cmn(bp, cmd_params: params, data: rdata); |
5063 | |
5064 | /* No need for an explicit memory barrier here as long as we |
5065 | * ensure the ordering of writing to the SPQ element |
5066 | * and updating of the SPQ producer which involves a memory |
5067 | * read. If the memory read is removed we will have to put a |
5068 | * full memory barrier there (inside bnx2x_sp_post()). |
5069 | */ |
5070 | return bnx2x_sp_post(bp, command: ramrod, cid: o->cids[BNX2X_PRIMARY_CID_INDEX], |
5071 | U64_HI(data_mapping), |
5072 | U64_LO(data_mapping), cmd_type: ETH_CONNECTION_TYPE); |
5073 | } |
5074 | |
5075 | static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, |
5076 | struct bnx2x_queue_state_params *params) |
5077 | { |
5078 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5079 | struct client_init_ramrod_data *rdata = |
5080 | (struct client_init_ramrod_data *)o->rdata; |
5081 | dma_addr_t data_mapping = o->rdata_mapping; |
5082 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; |
5083 | |
5084 | /* Clear the ramrod data */ |
5085 | memset(rdata, 0, sizeof(*rdata)); |
5086 | |
5087 | /* Fill the ramrod data */ |
5088 | bnx2x_q_fill_setup_data_cmn(bp, cmd_params: params, data: rdata); |
5089 | bnx2x_q_fill_setup_data_e2(bp, cmd_params: params, data: rdata); |
5090 | |
5091 | /* No need for an explicit memory barrier here as long as we |
5092 | * ensure the ordering of writing to the SPQ element |
5093 | * and updating of the SPQ producer which involves a memory |
5094 | * read. If the memory read is removed we will have to put a |
5095 | * full memory barrier there (inside bnx2x_sp_post()). |
5096 | */ |
5097 | return bnx2x_sp_post(bp, command: ramrod, cid: o->cids[BNX2X_PRIMARY_CID_INDEX], |
5098 | U64_HI(data_mapping), |
5099 | U64_LO(data_mapping), cmd_type: ETH_CONNECTION_TYPE); |
5100 | } |
5101 | |
5102 | static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, |
5103 | struct bnx2x_queue_state_params *params) |
5104 | { |
5105 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5106 | struct tx_queue_init_ramrod_data *rdata = |
5107 | (struct tx_queue_init_ramrod_data *)o->rdata; |
5108 | dma_addr_t data_mapping = o->rdata_mapping; |
5109 | int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; |
5110 | struct bnx2x_queue_setup_tx_only_params *tx_only_params = |
5111 | ¶ms->params.tx_only; |
5112 | u8 cid_index = tx_only_params->cid_index; |
5113 | |
5114 | if (cid_index >= o->max_cos) { |
5115 | BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n" , |
5116 | o->cl_id, cid_index); |
5117 | return -EINVAL; |
5118 | } |
5119 | |
5120 | DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n" , |
5121 | tx_only_params->gen_params.cos, |
5122 | tx_only_params->gen_params.spcl_id); |
5123 | |
5124 | /* Clear the ramrod data */ |
5125 | memset(rdata, 0, sizeof(*rdata)); |
5126 | |
5127 | /* Fill the ramrod data */ |
5128 | bnx2x_q_fill_setup_tx_only(bp, cmd_params: params, data: rdata); |
5129 | |
5130 | DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n" , |
5131 | o->cids[cid_index], rdata->general.client_id, |
5132 | rdata->general.sp_client_id, rdata->general.cos); |
5133 | |
5134 | /* No need for an explicit memory barrier here as long as we |
5135 | * ensure the ordering of writing to the SPQ element |
5136 | * and updating of the SPQ producer which involves a memory |
5137 | * read. If the memory read is removed we will have to put a |
5138 | * full memory barrier there (inside bnx2x_sp_post()). |
5139 | */ |
5140 | return bnx2x_sp_post(bp, command: ramrod, cid: o->cids[cid_index], |
5141 | U64_HI(data_mapping), |
5142 | U64_LO(data_mapping), cmd_type: ETH_CONNECTION_TYPE); |
5143 | } |
5144 | |
5145 | static void bnx2x_q_fill_update_data(struct bnx2x *bp, |
5146 | struct bnx2x_queue_sp_obj *obj, |
5147 | struct bnx2x_queue_update_params *params, |
5148 | struct client_update_ramrod_data *data) |
5149 | { |
5150 | /* Client ID of the client to update */ |
5151 | data->client_id = obj->cl_id; |
5152 | |
5153 | /* Function ID of the client to update */ |
5154 | data->func_id = obj->func_id; |
5155 | |
5156 | /* Default VLAN value */ |
5157 | data->default_vlan = cpu_to_le16(params->def_vlan); |
5158 | |
5159 | /* Inner VLAN stripping */ |
5160 | data->inner_vlan_removal_enable_flg = |
5161 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); |
5162 | data->inner_vlan_removal_change_flg = |
5163 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, |
5164 | ¶ms->update_flags); |
5165 | |
5166 | /* Outer VLAN stripping */ |
5167 | data->outer_vlan_removal_enable_flg = |
5168 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); |
5169 | data->outer_vlan_removal_change_flg = |
5170 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, |
5171 | ¶ms->update_flags); |
5172 | |
5173 | /* Drop packets that have source MAC that doesn't belong to this |
5174 | * Queue. |
5175 | */ |
5176 | data->anti_spoofing_enable_flg = |
5177 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); |
5178 | data->anti_spoofing_change_flg = |
5179 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); |
5180 | |
5181 | /* Activate/Deactivate */ |
5182 | data->activate_flg = |
5183 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); |
5184 | data->activate_change_flg = |
5185 | test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); |
5186 | |
5187 | /* Enable default VLAN */ |
5188 | data->default_vlan_enable_flg = |
5189 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); |
5190 | data->default_vlan_change_flg = |
5191 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, |
5192 | ¶ms->update_flags); |
5193 | |
5194 | /* silent vlan removal */ |
5195 | data->silent_vlan_change_flg = |
5196 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, |
5197 | ¶ms->update_flags); |
5198 | data->silent_vlan_removal_flg = |
5199 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); |
5200 | data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); |
5201 | data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); |
5202 | |
5203 | /* tx switching */ |
5204 | data->tx_switching_flg = |
5205 | test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); |
5206 | data->tx_switching_change_flg = |
5207 | test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, |
5208 | ¶ms->update_flags); |
5209 | |
5210 | /* PTP */ |
5211 | data->handle_ptp_pkts_flg = |
5212 | test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); |
5213 | data->handle_ptp_pkts_change_flg = |
5214 | test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); |
5215 | } |
5216 | |
5217 | static inline int bnx2x_q_send_update(struct bnx2x *bp, |
5218 | struct bnx2x_queue_state_params *params) |
5219 | { |
5220 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5221 | struct client_update_ramrod_data *rdata = |
5222 | (struct client_update_ramrod_data *)o->rdata; |
5223 | dma_addr_t data_mapping = o->rdata_mapping; |
5224 | struct bnx2x_queue_update_params *update_params = |
5225 | ¶ms->params.update; |
5226 | u8 cid_index = update_params->cid_index; |
5227 | |
5228 | if (cid_index >= o->max_cos) { |
5229 | BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n" , |
5230 | o->cl_id, cid_index); |
5231 | return -EINVAL; |
5232 | } |
5233 | |
5234 | /* Clear the ramrod data */ |
5235 | memset(rdata, 0, sizeof(*rdata)); |
5236 | |
5237 | /* Fill the ramrod data */ |
5238 | bnx2x_q_fill_update_data(bp, obj: o, params: update_params, data: rdata); |
5239 | |
5240 | /* No need for an explicit memory barrier here as long as we |
5241 | * ensure the ordering of writing to the SPQ element |
5242 | * and updating of the SPQ producer which involves a memory |
5243 | * read. If the memory read is removed we will have to put a |
5244 | * full memory barrier there (inside bnx2x_sp_post()). |
5245 | */ |
5246 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_CLIENT_UPDATE, |
5247 | cid: o->cids[cid_index], U64_HI(data_mapping), |
5248 | U64_LO(data_mapping), cmd_type: ETH_CONNECTION_TYPE); |
5249 | } |
5250 | |
5251 | /** |
5252 | * bnx2x_q_send_deactivate - send DEACTIVATE command |
5253 | * |
5254 | * @bp: device handle |
5255 | * @params: |
5256 | * |
5257 | * implemented using the UPDATE command. |
5258 | */ |
5259 | static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, |
5260 | struct bnx2x_queue_state_params *params) |
5261 | { |
5262 | struct bnx2x_queue_update_params *update = ¶ms->params.update; |
5263 | |
5264 | memset(update, 0, sizeof(*update)); |
5265 | |
5266 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); |
5267 | |
5268 | return bnx2x_q_send_update(bp, params); |
5269 | } |
5270 | |
5271 | /** |
5272 | * bnx2x_q_send_activate - send ACTIVATE command |
5273 | * |
5274 | * @bp: device handle |
5275 | * @params: |
5276 | * |
5277 | * implemented using the UPDATE command. |
5278 | */ |
5279 | static inline int bnx2x_q_send_activate(struct bnx2x *bp, |
5280 | struct bnx2x_queue_state_params *params) |
5281 | { |
5282 | struct bnx2x_queue_update_params *update = ¶ms->params.update; |
5283 | |
5284 | memset(update, 0, sizeof(*update)); |
5285 | |
5286 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); |
5287 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); |
5288 | |
5289 | return bnx2x_q_send_update(bp, params); |
5290 | } |
5291 | |
5292 | static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, |
5293 | struct bnx2x_queue_sp_obj *obj, |
5294 | struct bnx2x_queue_update_tpa_params *params, |
5295 | struct tpa_update_ramrod_data *data) |
5296 | { |
5297 | data->client_id = obj->cl_id; |
5298 | data->complete_on_both_clients = params->complete_on_both_clients; |
5299 | data->dont_verify_rings_pause_thr_flg = |
5300 | params->dont_verify_thr; |
5301 | data->max_agg_size = cpu_to_le16(params->max_agg_sz); |
5302 | data->max_sges_for_packet = params->max_sges_pkt; |
5303 | data->max_tpa_queues = params->max_tpa_queues; |
5304 | data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); |
5305 | data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); |
5306 | data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); |
5307 | data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); |
5308 | data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); |
5309 | data->tpa_mode = params->tpa_mode; |
5310 | data->update_ipv4 = params->update_ipv4; |
5311 | data->update_ipv6 = params->update_ipv6; |
5312 | } |
5313 | |
5314 | static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, |
5315 | struct bnx2x_queue_state_params *params) |
5316 | { |
5317 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5318 | struct tpa_update_ramrod_data *rdata = |
5319 | (struct tpa_update_ramrod_data *)o->rdata; |
5320 | dma_addr_t data_mapping = o->rdata_mapping; |
5321 | struct bnx2x_queue_update_tpa_params *update_tpa_params = |
5322 | ¶ms->params.update_tpa; |
5323 | u16 type; |
5324 | |
5325 | /* Clear the ramrod data */ |
5326 | memset(rdata, 0, sizeof(*rdata)); |
5327 | |
5328 | /* Fill the ramrod data */ |
5329 | bnx2x_q_fill_update_tpa_data(bp, obj: o, params: update_tpa_params, data: rdata); |
5330 | |
5331 | /* Add the function id inside the type, so that sp post function |
5332 | * doesn't automatically add the PF func-id, this is required |
5333 | * for operations done by PFs on behalf of their VFs |
5334 | */ |
5335 | type = ETH_CONNECTION_TYPE | |
5336 | ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); |
5337 | |
5338 | /* No need for an explicit memory barrier here as long as we |
5339 | * ensure the ordering of writing to the SPQ element |
5340 | * and updating of the SPQ producer which involves a memory |
5341 | * read. If the memory read is removed we will have to put a |
5342 | * full memory barrier there (inside bnx2x_sp_post()). |
5343 | */ |
5344 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_TPA_UPDATE, |
5345 | cid: o->cids[BNX2X_PRIMARY_CID_INDEX], |
5346 | U64_HI(data_mapping), |
5347 | U64_LO(data_mapping), cmd_type: type); |
5348 | } |
5349 | |
5350 | static inline int bnx2x_q_send_halt(struct bnx2x *bp, |
5351 | struct bnx2x_queue_state_params *params) |
5352 | { |
5353 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5354 | |
5355 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_HALT, |
5356 | cid: o->cids[BNX2X_PRIMARY_CID_INDEX], data_hi: 0, data_lo: o->cl_id, |
5357 | cmd_type: ETH_CONNECTION_TYPE); |
5358 | } |
5359 | |
5360 | static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, |
5361 | struct bnx2x_queue_state_params *params) |
5362 | { |
5363 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5364 | u8 cid_idx = params->params.cfc_del.cid_index; |
5365 | |
5366 | if (cid_idx >= o->max_cos) { |
5367 | BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n" , |
5368 | o->cl_id, cid_idx); |
5369 | return -EINVAL; |
5370 | } |
5371 | |
5372 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_CFC_DEL, |
5373 | cid: o->cids[cid_idx], data_hi: 0, data_lo: 0, cmd_type: NONE_CONNECTION_TYPE); |
5374 | } |
5375 | |
5376 | static inline int bnx2x_q_send_terminate(struct bnx2x *bp, |
5377 | struct bnx2x_queue_state_params *params) |
5378 | { |
5379 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5380 | u8 cid_index = params->params.terminate.cid_index; |
5381 | |
5382 | if (cid_index >= o->max_cos) { |
5383 | BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n" , |
5384 | o->cl_id, cid_index); |
5385 | return -EINVAL; |
5386 | } |
5387 | |
5388 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_TERMINATE, |
5389 | cid: o->cids[cid_index], data_hi: 0, data_lo: 0, cmd_type: ETH_CONNECTION_TYPE); |
5390 | } |
5391 | |
5392 | static inline int bnx2x_q_send_empty(struct bnx2x *bp, |
5393 | struct bnx2x_queue_state_params *params) |
5394 | { |
5395 | struct bnx2x_queue_sp_obj *o = params->q_obj; |
5396 | |
5397 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_ETH_EMPTY, |
5398 | cid: o->cids[BNX2X_PRIMARY_CID_INDEX], data_hi: 0, data_lo: 0, |
5399 | cmd_type: ETH_CONNECTION_TYPE); |
5400 | } |
5401 | |
5402 | static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, |
5403 | struct bnx2x_queue_state_params *params) |
5404 | { |
5405 | switch (params->cmd) { |
5406 | case BNX2X_Q_CMD_INIT: |
5407 | return bnx2x_q_init(bp, params); |
5408 | case BNX2X_Q_CMD_SETUP_TX_ONLY: |
5409 | return bnx2x_q_send_setup_tx_only(bp, params); |
5410 | case BNX2X_Q_CMD_DEACTIVATE: |
5411 | return bnx2x_q_send_deactivate(bp, params); |
5412 | case BNX2X_Q_CMD_ACTIVATE: |
5413 | return bnx2x_q_send_activate(bp, params); |
5414 | case BNX2X_Q_CMD_UPDATE: |
5415 | return bnx2x_q_send_update(bp, params); |
5416 | case BNX2X_Q_CMD_UPDATE_TPA: |
5417 | return bnx2x_q_send_update_tpa(bp, params); |
5418 | case BNX2X_Q_CMD_HALT: |
5419 | return bnx2x_q_send_halt(bp, params); |
5420 | case BNX2X_Q_CMD_CFC_DEL: |
5421 | return bnx2x_q_send_cfc_del(bp, params); |
5422 | case BNX2X_Q_CMD_TERMINATE: |
5423 | return bnx2x_q_send_terminate(bp, params); |
5424 | case BNX2X_Q_CMD_EMPTY: |
5425 | return bnx2x_q_send_empty(bp, params); |
5426 | default: |
5427 | BNX2X_ERR("Unknown command: %d\n" , params->cmd); |
5428 | return -EINVAL; |
5429 | } |
5430 | } |
5431 | |
5432 | static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, |
5433 | struct bnx2x_queue_state_params *params) |
5434 | { |
5435 | switch (params->cmd) { |
5436 | case BNX2X_Q_CMD_SETUP: |
5437 | return bnx2x_q_send_setup_e1x(bp, params); |
5438 | case BNX2X_Q_CMD_INIT: |
5439 | case BNX2X_Q_CMD_SETUP_TX_ONLY: |
5440 | case BNX2X_Q_CMD_DEACTIVATE: |
5441 | case BNX2X_Q_CMD_ACTIVATE: |
5442 | case BNX2X_Q_CMD_UPDATE: |
5443 | case BNX2X_Q_CMD_UPDATE_TPA: |
5444 | case BNX2X_Q_CMD_HALT: |
5445 | case BNX2X_Q_CMD_CFC_DEL: |
5446 | case BNX2X_Q_CMD_TERMINATE: |
5447 | case BNX2X_Q_CMD_EMPTY: |
5448 | return bnx2x_queue_send_cmd_cmn(bp, params); |
5449 | default: |
5450 | BNX2X_ERR("Unknown command: %d\n" , params->cmd); |
5451 | return -EINVAL; |
5452 | } |
5453 | } |
5454 | |
5455 | static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, |
5456 | struct bnx2x_queue_state_params *params) |
5457 | { |
5458 | switch (params->cmd) { |
5459 | case BNX2X_Q_CMD_SETUP: |
5460 | return bnx2x_q_send_setup_e2(bp, params); |
5461 | case BNX2X_Q_CMD_INIT: |
5462 | case BNX2X_Q_CMD_SETUP_TX_ONLY: |
5463 | case BNX2X_Q_CMD_DEACTIVATE: |
5464 | case BNX2X_Q_CMD_ACTIVATE: |
5465 | case BNX2X_Q_CMD_UPDATE: |
5466 | case BNX2X_Q_CMD_UPDATE_TPA: |
5467 | case BNX2X_Q_CMD_HALT: |
5468 | case BNX2X_Q_CMD_CFC_DEL: |
5469 | case BNX2X_Q_CMD_TERMINATE: |
5470 | case BNX2X_Q_CMD_EMPTY: |
5471 | return bnx2x_queue_send_cmd_cmn(bp, params); |
5472 | default: |
5473 | BNX2X_ERR("Unknown command: %d\n" , params->cmd); |
5474 | return -EINVAL; |
5475 | } |
5476 | } |
5477 | |
5478 | /** |
5479 | * bnx2x_queue_chk_transition - check state machine of a regular Queue |
5480 | * |
5481 | * @bp: device handle |
5482 | * @o: queue info |
5483 | * @params: queue state |
5484 | * |
5485 | * (not Forwarding) |
5486 | * It both checks if the requested command is legal in a current |
5487 | * state and, if it's legal, sets a `next_state' in the object |
5488 | * that will be used in the completion flow to set the `state' |
5489 | * of the object. |
5490 | * |
5491 | * returns 0 if a requested command is a legal transition, |
5492 | * -EINVAL otherwise. |
5493 | */ |
5494 | static int bnx2x_queue_chk_transition(struct bnx2x *bp, |
5495 | struct bnx2x_queue_sp_obj *o, |
5496 | struct bnx2x_queue_state_params *params) |
5497 | { |
5498 | enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; |
5499 | enum bnx2x_queue_cmd cmd = params->cmd; |
5500 | struct bnx2x_queue_update_params *update_params = |
5501 | ¶ms->params.update; |
5502 | u8 next_tx_only = o->num_tx_only; |
5503 | |
5504 | /* Forget all pending for completion commands if a driver only state |
5505 | * transition has been requested. |
5506 | */ |
5507 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { |
5508 | o->pending = 0; |
5509 | o->next_state = BNX2X_Q_STATE_MAX; |
5510 | } |
5511 | |
5512 | /* Don't allow a next state transition if we are in the middle of |
5513 | * the previous one. |
5514 | */ |
5515 | if (o->pending) { |
5516 | BNX2X_ERR("Blocking transition since pending was %lx\n" , |
5517 | o->pending); |
5518 | return -EBUSY; |
5519 | } |
5520 | |
5521 | switch (state) { |
5522 | case BNX2X_Q_STATE_RESET: |
5523 | if (cmd == BNX2X_Q_CMD_INIT) |
5524 | next_state = BNX2X_Q_STATE_INITIALIZED; |
5525 | |
5526 | break; |
5527 | case BNX2X_Q_STATE_INITIALIZED: |
5528 | if (cmd == BNX2X_Q_CMD_SETUP) { |
5529 | if (test_bit(BNX2X_Q_FLG_ACTIVE, |
5530 | ¶ms->params.setup.flags)) |
5531 | next_state = BNX2X_Q_STATE_ACTIVE; |
5532 | else |
5533 | next_state = BNX2X_Q_STATE_INACTIVE; |
5534 | } |
5535 | |
5536 | break; |
5537 | case BNX2X_Q_STATE_ACTIVE: |
5538 | if (cmd == BNX2X_Q_CMD_DEACTIVATE) |
5539 | next_state = BNX2X_Q_STATE_INACTIVE; |
5540 | |
5541 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || |
5542 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) |
5543 | next_state = BNX2X_Q_STATE_ACTIVE; |
5544 | |
5545 | else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { |
5546 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5547 | next_tx_only = 1; |
5548 | } |
5549 | |
5550 | else if (cmd == BNX2X_Q_CMD_HALT) |
5551 | next_state = BNX2X_Q_STATE_STOPPED; |
5552 | |
5553 | else if (cmd == BNX2X_Q_CMD_UPDATE) { |
5554 | /* If "active" state change is requested, update the |
5555 | * state accordingly. |
5556 | */ |
5557 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, |
5558 | &update_params->update_flags) && |
5559 | !test_bit(BNX2X_Q_UPDATE_ACTIVATE, |
5560 | &update_params->update_flags)) |
5561 | next_state = BNX2X_Q_STATE_INACTIVE; |
5562 | else |
5563 | next_state = BNX2X_Q_STATE_ACTIVE; |
5564 | } |
5565 | |
5566 | break; |
5567 | case BNX2X_Q_STATE_MULTI_COS: |
5568 | if (cmd == BNX2X_Q_CMD_TERMINATE) |
5569 | next_state = BNX2X_Q_STATE_MCOS_TERMINATED; |
5570 | |
5571 | else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { |
5572 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5573 | next_tx_only = o->num_tx_only + 1; |
5574 | } |
5575 | |
5576 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || |
5577 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) |
5578 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5579 | |
5580 | else if (cmd == BNX2X_Q_CMD_UPDATE) { |
5581 | /* If "active" state change is requested, update the |
5582 | * state accordingly. |
5583 | */ |
5584 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, |
5585 | &update_params->update_flags) && |
5586 | !test_bit(BNX2X_Q_UPDATE_ACTIVATE, |
5587 | &update_params->update_flags)) |
5588 | next_state = BNX2X_Q_STATE_INACTIVE; |
5589 | else |
5590 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5591 | } |
5592 | |
5593 | break; |
5594 | case BNX2X_Q_STATE_MCOS_TERMINATED: |
5595 | if (cmd == BNX2X_Q_CMD_CFC_DEL) { |
5596 | next_tx_only = o->num_tx_only - 1; |
5597 | if (next_tx_only == 0) |
5598 | next_state = BNX2X_Q_STATE_ACTIVE; |
5599 | else |
5600 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5601 | } |
5602 | |
5603 | break; |
5604 | case BNX2X_Q_STATE_INACTIVE: |
5605 | if (cmd == BNX2X_Q_CMD_ACTIVATE) |
5606 | next_state = BNX2X_Q_STATE_ACTIVE; |
5607 | |
5608 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || |
5609 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) |
5610 | next_state = BNX2X_Q_STATE_INACTIVE; |
5611 | |
5612 | else if (cmd == BNX2X_Q_CMD_HALT) |
5613 | next_state = BNX2X_Q_STATE_STOPPED; |
5614 | |
5615 | else if (cmd == BNX2X_Q_CMD_UPDATE) { |
5616 | /* If "active" state change is requested, update the |
5617 | * state accordingly. |
5618 | */ |
5619 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, |
5620 | &update_params->update_flags) && |
5621 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, |
5622 | &update_params->update_flags)){ |
5623 | if (o->num_tx_only == 0) |
5624 | next_state = BNX2X_Q_STATE_ACTIVE; |
5625 | else /* tx only queues exist for this queue */ |
5626 | next_state = BNX2X_Q_STATE_MULTI_COS; |
5627 | } else |
5628 | next_state = BNX2X_Q_STATE_INACTIVE; |
5629 | } |
5630 | |
5631 | break; |
5632 | case BNX2X_Q_STATE_STOPPED: |
5633 | if (cmd == BNX2X_Q_CMD_TERMINATE) |
5634 | next_state = BNX2X_Q_STATE_TERMINATED; |
5635 | |
5636 | break; |
5637 | case BNX2X_Q_STATE_TERMINATED: |
5638 | if (cmd == BNX2X_Q_CMD_CFC_DEL) |
5639 | next_state = BNX2X_Q_STATE_RESET; |
5640 | |
5641 | break; |
5642 | default: |
5643 | BNX2X_ERR("Illegal state: %d\n" , state); |
5644 | } |
5645 | |
5646 | /* Transition is assured */ |
5647 | if (next_state != BNX2X_Q_STATE_MAX) { |
5648 | DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n" , |
5649 | state, cmd, next_state); |
5650 | o->next_state = next_state; |
5651 | o->next_tx_only = next_tx_only; |
5652 | return 0; |
5653 | } |
5654 | |
5655 | DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n" , state, cmd); |
5656 | |
5657 | return -EINVAL; |
5658 | } |
5659 | |
5660 | void bnx2x_init_queue_obj(struct bnx2x *bp, |
5661 | struct bnx2x_queue_sp_obj *obj, |
5662 | u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, |
5663 | void *rdata, |
5664 | dma_addr_t rdata_mapping, unsigned long type) |
5665 | { |
5666 | memset(obj, 0, sizeof(*obj)); |
5667 | |
5668 | /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ |
5669 | BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); |
5670 | |
5671 | memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); |
5672 | obj->max_cos = cid_cnt; |
5673 | obj->cl_id = cl_id; |
5674 | obj->func_id = func_id; |
5675 | obj->rdata = rdata; |
5676 | obj->rdata_mapping = rdata_mapping; |
5677 | obj->type = type; |
5678 | obj->next_state = BNX2X_Q_STATE_MAX; |
5679 | |
5680 | if (CHIP_IS_E1x(bp)) |
5681 | obj->send_cmd = bnx2x_queue_send_cmd_e1x; |
5682 | else |
5683 | obj->send_cmd = bnx2x_queue_send_cmd_e2; |
5684 | |
5685 | obj->check_transition = bnx2x_queue_chk_transition; |
5686 | |
5687 | obj->complete_cmd = bnx2x_queue_comp_cmd; |
5688 | obj->wait_comp = bnx2x_queue_wait_comp; |
5689 | obj->set_pending = bnx2x_queue_set_pending; |
5690 | } |
5691 | |
5692 | /* return a queue object's logical state*/ |
5693 | int bnx2x_get_q_logical_state(struct bnx2x *bp, |
5694 | struct bnx2x_queue_sp_obj *obj) |
5695 | { |
5696 | switch (obj->state) { |
5697 | case BNX2X_Q_STATE_ACTIVE: |
5698 | case BNX2X_Q_STATE_MULTI_COS: |
5699 | return BNX2X_Q_LOGICAL_STATE_ACTIVE; |
5700 | case BNX2X_Q_STATE_RESET: |
5701 | case BNX2X_Q_STATE_INITIALIZED: |
5702 | case BNX2X_Q_STATE_MCOS_TERMINATED: |
5703 | case BNX2X_Q_STATE_INACTIVE: |
5704 | case BNX2X_Q_STATE_STOPPED: |
5705 | case BNX2X_Q_STATE_TERMINATED: |
5706 | case BNX2X_Q_STATE_FLRED: |
5707 | return BNX2X_Q_LOGICAL_STATE_STOPPED; |
5708 | default: |
5709 | return -EINVAL; |
5710 | } |
5711 | } |
5712 | |
5713 | /********************** Function state object *********************************/ |
5714 | enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, |
5715 | struct bnx2x_func_sp_obj *o) |
5716 | { |
5717 | /* in the middle of transaction - return INVALID state */ |
5718 | if (o->pending) |
5719 | return BNX2X_F_STATE_MAX; |
5720 | |
5721 | /* unsure the order of reading of o->pending and o->state |
5722 | * o->pending should be read first |
5723 | */ |
5724 | rmb(); |
5725 | |
5726 | return o->state; |
5727 | } |
5728 | |
5729 | static int bnx2x_func_wait_comp(struct bnx2x *bp, |
5730 | struct bnx2x_func_sp_obj *o, |
5731 | enum bnx2x_func_cmd cmd) |
5732 | { |
5733 | return bnx2x_state_wait(bp, state: cmd, pstate: &o->pending); |
5734 | } |
5735 | |
5736 | /** |
5737 | * bnx2x_func_state_change_comp - complete the state machine transition |
5738 | * |
5739 | * @bp: device handle |
5740 | * @o: function info |
5741 | * @cmd: more info |
5742 | * |
5743 | * Called on state change transition. Completes the state |
5744 | * machine transition only - no HW interaction. |
5745 | */ |
5746 | static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, |
5747 | struct bnx2x_func_sp_obj *o, |
5748 | enum bnx2x_func_cmd cmd) |
5749 | { |
5750 | unsigned long cur_pending = o->pending; |
5751 | |
5752 | if (!test_and_clear_bit(nr: cmd, addr: &cur_pending)) { |
5753 | BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n" , |
5754 | cmd, BP_FUNC(bp), o->state, |
5755 | cur_pending, o->next_state); |
5756 | return -EINVAL; |
5757 | } |
5758 | |
5759 | DP(BNX2X_MSG_SP, |
5760 | "Completing command %d for func %d, setting state to %d\n" , |
5761 | cmd, BP_FUNC(bp), o->next_state); |
5762 | |
5763 | o->state = o->next_state; |
5764 | o->next_state = BNX2X_F_STATE_MAX; |
5765 | |
5766 | /* It's important that o->state and o->next_state are |
5767 | * updated before o->pending. |
5768 | */ |
5769 | wmb(); |
5770 | |
5771 | clear_bit(nr: cmd, addr: &o->pending); |
5772 | smp_mb__after_atomic(); |
5773 | |
5774 | return 0; |
5775 | } |
5776 | |
5777 | /** |
5778 | * bnx2x_func_comp_cmd - complete the state change command |
5779 | * |
5780 | * @bp: device handle |
5781 | * @o: function info |
5782 | * @cmd: more info |
5783 | * |
5784 | * Checks that the arrived completion is expected. |
5785 | */ |
5786 | static int bnx2x_func_comp_cmd(struct bnx2x *bp, |
5787 | struct bnx2x_func_sp_obj *o, |
5788 | enum bnx2x_func_cmd cmd) |
5789 | { |
5790 | /* Complete the state machine part first, check if it's a |
5791 | * legal completion. |
5792 | */ |
5793 | int rc = bnx2x_func_state_change_comp(bp, o, cmd); |
5794 | return rc; |
5795 | } |
5796 | |
5797 | /** |
5798 | * bnx2x_func_chk_transition - perform function state machine transition |
5799 | * |
5800 | * @bp: device handle |
5801 | * @o: function info |
5802 | * @params: state parameters |
5803 | * |
5804 | * It both checks if the requested command is legal in a current |
5805 | * state and, if it's legal, sets a `next_state' in the object |
5806 | * that will be used in the completion flow to set the `state' |
5807 | * of the object. |
5808 | * |
5809 | * returns 0 if a requested command is a legal transition, |
5810 | * -EINVAL otherwise. |
5811 | */ |
5812 | static int bnx2x_func_chk_transition(struct bnx2x *bp, |
5813 | struct bnx2x_func_sp_obj *o, |
5814 | struct bnx2x_func_state_params *params) |
5815 | { |
5816 | enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; |
5817 | enum bnx2x_func_cmd cmd = params->cmd; |
5818 | |
5819 | /* Forget all pending for completion commands if a driver only state |
5820 | * transition has been requested. |
5821 | */ |
5822 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { |
5823 | o->pending = 0; |
5824 | o->next_state = BNX2X_F_STATE_MAX; |
5825 | } |
5826 | |
5827 | /* Don't allow a next state transition if we are in the middle of |
5828 | * the previous one. |
5829 | */ |
5830 | if (o->pending) |
5831 | return -EBUSY; |
5832 | |
5833 | switch (state) { |
5834 | case BNX2X_F_STATE_RESET: |
5835 | if (cmd == BNX2X_F_CMD_HW_INIT) |
5836 | next_state = BNX2X_F_STATE_INITIALIZED; |
5837 | |
5838 | break; |
5839 | case BNX2X_F_STATE_INITIALIZED: |
5840 | if (cmd == BNX2X_F_CMD_START) |
5841 | next_state = BNX2X_F_STATE_STARTED; |
5842 | |
5843 | else if (cmd == BNX2X_F_CMD_HW_RESET) |
5844 | next_state = BNX2X_F_STATE_RESET; |
5845 | |
5846 | break; |
5847 | case BNX2X_F_STATE_STARTED: |
5848 | if (cmd == BNX2X_F_CMD_STOP) |
5849 | next_state = BNX2X_F_STATE_INITIALIZED; |
5850 | /* afex ramrods can be sent only in started mode, and only |
5851 | * if not pending for function_stop ramrod completion |
5852 | * for these events - next state remained STARTED. |
5853 | */ |
5854 | else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && |
5855 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5856 | next_state = BNX2X_F_STATE_STARTED; |
5857 | |
5858 | else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && |
5859 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5860 | next_state = BNX2X_F_STATE_STARTED; |
5861 | |
5862 | /* Switch_update ramrod can be sent in either started or |
5863 | * tx_stopped state, and it doesn't change the state. |
5864 | */ |
5865 | else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && |
5866 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5867 | next_state = BNX2X_F_STATE_STARTED; |
5868 | |
5869 | else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && |
5870 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5871 | next_state = BNX2X_F_STATE_STARTED; |
5872 | |
5873 | else if (cmd == BNX2X_F_CMD_TX_STOP) |
5874 | next_state = BNX2X_F_STATE_TX_STOPPED; |
5875 | |
5876 | break; |
5877 | case BNX2X_F_STATE_TX_STOPPED: |
5878 | if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && |
5879 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5880 | next_state = BNX2X_F_STATE_TX_STOPPED; |
5881 | |
5882 | else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && |
5883 | (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) |
5884 | next_state = BNX2X_F_STATE_TX_STOPPED; |
5885 | |
5886 | else if (cmd == BNX2X_F_CMD_TX_START) |
5887 | next_state = BNX2X_F_STATE_STARTED; |
5888 | |
5889 | break; |
5890 | default: |
5891 | BNX2X_ERR("Unknown state: %d\n" , state); |
5892 | } |
5893 | |
5894 | /* Transition is assured */ |
5895 | if (next_state != BNX2X_F_STATE_MAX) { |
5896 | DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n" , |
5897 | state, cmd, next_state); |
5898 | o->next_state = next_state; |
5899 | return 0; |
5900 | } |
5901 | |
5902 | DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n" , |
5903 | state, cmd); |
5904 | |
5905 | return -EINVAL; |
5906 | } |
5907 | |
5908 | /** |
5909 | * bnx2x_func_init_func - performs HW init at function stage |
5910 | * |
5911 | * @bp: device handle |
5912 | * @drv: |
5913 | * |
5914 | * Init HW when the current phase is |
5915 | * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only |
5916 | * HW blocks. |
5917 | */ |
5918 | static inline int bnx2x_func_init_func(struct bnx2x *bp, |
5919 | const struct bnx2x_func_sp_drv_ops *drv) |
5920 | { |
5921 | return drv->init_hw_func(bp); |
5922 | } |
5923 | |
5924 | /** |
5925 | * bnx2x_func_init_port - performs HW init at port stage |
5926 | * |
5927 | * @bp: device handle |
5928 | * @drv: |
5929 | * |
5930 | * Init HW when the current phase is |
5931 | * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and |
5932 | * FUNCTION-only HW blocks. |
5933 | * |
5934 | */ |
5935 | static inline int bnx2x_func_init_port(struct bnx2x *bp, |
5936 | const struct bnx2x_func_sp_drv_ops *drv) |
5937 | { |
5938 | int rc = drv->init_hw_port(bp); |
5939 | if (rc) |
5940 | return rc; |
5941 | |
5942 | return bnx2x_func_init_func(bp, drv); |
5943 | } |
5944 | |
5945 | /** |
5946 | * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage |
5947 | * |
5948 | * @bp: device handle |
5949 | * @drv: |
5950 | * |
5951 | * Init HW when the current phase is |
5952 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, |
5953 | * PORT-only and FUNCTION-only HW blocks. |
5954 | */ |
5955 | static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, |
5956 | const struct bnx2x_func_sp_drv_ops *drv) |
5957 | { |
5958 | int rc = drv->init_hw_cmn_chip(bp); |
5959 | if (rc) |
5960 | return rc; |
5961 | |
5962 | return bnx2x_func_init_port(bp, drv); |
5963 | } |
5964 | |
5965 | /** |
5966 | * bnx2x_func_init_cmn - performs HW init at common stage |
5967 | * |
5968 | * @bp: device handle |
5969 | * @drv: |
5970 | * |
5971 | * Init HW when the current phase is |
5972 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, |
5973 | * PORT-only and FUNCTION-only HW blocks. |
5974 | */ |
5975 | static inline int bnx2x_func_init_cmn(struct bnx2x *bp, |
5976 | const struct bnx2x_func_sp_drv_ops *drv) |
5977 | { |
5978 | int rc = drv->init_hw_cmn(bp); |
5979 | if (rc) |
5980 | return rc; |
5981 | |
5982 | return bnx2x_func_init_port(bp, drv); |
5983 | } |
5984 | |
5985 | static int bnx2x_func_hw_init(struct bnx2x *bp, |
5986 | struct bnx2x_func_state_params *params) |
5987 | { |
5988 | u32 load_code = params->params.hw_init.load_phase; |
5989 | struct bnx2x_func_sp_obj *o = params->f_obj; |
5990 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; |
5991 | int rc = 0; |
5992 | |
5993 | DP(BNX2X_MSG_SP, "function %d load_code %x\n" , |
5994 | BP_ABS_FUNC(bp), load_code); |
5995 | |
5996 | /* Prepare buffers for unzipping the FW */ |
5997 | rc = drv->gunzip_init(bp); |
5998 | if (rc) |
5999 | return rc; |
6000 | |
6001 | /* Prepare FW */ |
6002 | rc = drv->init_fw(bp); |
6003 | if (rc) { |
6004 | BNX2X_ERR("Error loading firmware\n" ); |
6005 | goto init_err; |
6006 | } |
6007 | |
6008 | /* Handle the beginning of COMMON_XXX pases separately... */ |
6009 | switch (load_code) { |
6010 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: |
6011 | rc = bnx2x_func_init_cmn_chip(bp, drv); |
6012 | if (rc) |
6013 | goto init_err; |
6014 | |
6015 | break; |
6016 | case FW_MSG_CODE_DRV_LOAD_COMMON: |
6017 | rc = bnx2x_func_init_cmn(bp, drv); |
6018 | if (rc) |
6019 | goto init_err; |
6020 | |
6021 | break; |
6022 | case FW_MSG_CODE_DRV_LOAD_PORT: |
6023 | rc = bnx2x_func_init_port(bp, drv); |
6024 | if (rc) |
6025 | goto init_err; |
6026 | |
6027 | break; |
6028 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
6029 | rc = bnx2x_func_init_func(bp, drv); |
6030 | if (rc) |
6031 | goto init_err; |
6032 | |
6033 | break; |
6034 | default: |
6035 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n" , load_code); |
6036 | rc = -EINVAL; |
6037 | } |
6038 | |
6039 | init_err: |
6040 | drv->gunzip_end(bp); |
6041 | |
6042 | /* In case of success, complete the command immediately: no ramrods |
6043 | * have been sent. |
6044 | */ |
6045 | if (!rc) |
6046 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); |
6047 | |
6048 | return rc; |
6049 | } |
6050 | |
6051 | /** |
6052 | * bnx2x_func_reset_func - reset HW at function stage |
6053 | * |
6054 | * @bp: device handle |
6055 | * @drv: |
6056 | * |
6057 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only |
6058 | * FUNCTION-only HW blocks. |
6059 | */ |
6060 | static inline void bnx2x_func_reset_func(struct bnx2x *bp, |
6061 | const struct bnx2x_func_sp_drv_ops *drv) |
6062 | { |
6063 | drv->reset_hw_func(bp); |
6064 | } |
6065 | |
6066 | /** |
6067 | * bnx2x_func_reset_port - reset HW at port stage |
6068 | * |
6069 | * @bp: device handle |
6070 | * @drv: |
6071 | * |
6072 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset |
6073 | * FUNCTION-only and PORT-only HW blocks. |
6074 | * |
6075 | * !!!IMPORTANT!!! |
6076 | * |
6077 | * It's important to call reset_port before reset_func() as the last thing |
6078 | * reset_func does is pf_disable() thus disabling PGLUE_B, which |
6079 | * makes impossible any DMAE transactions. |
6080 | */ |
6081 | static inline void bnx2x_func_reset_port(struct bnx2x *bp, |
6082 | const struct bnx2x_func_sp_drv_ops *drv) |
6083 | { |
6084 | drv->reset_hw_port(bp); |
6085 | bnx2x_func_reset_func(bp, drv); |
6086 | } |
6087 | |
6088 | /** |
6089 | * bnx2x_func_reset_cmn - reset HW at common stage |
6090 | * |
6091 | * @bp: device handle |
6092 | * @drv: |
6093 | * |
6094 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and |
6095 | * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, |
6096 | * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. |
6097 | */ |
6098 | static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, |
6099 | const struct bnx2x_func_sp_drv_ops *drv) |
6100 | { |
6101 | bnx2x_func_reset_port(bp, drv); |
6102 | drv->reset_hw_cmn(bp); |
6103 | } |
6104 | |
6105 | static inline int bnx2x_func_hw_reset(struct bnx2x *bp, |
6106 | struct bnx2x_func_state_params *params) |
6107 | { |
6108 | u32 reset_phase = params->params.hw_reset.reset_phase; |
6109 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6110 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; |
6111 | |
6112 | DP(BNX2X_MSG_SP, "function %d reset_phase %x\n" , BP_ABS_FUNC(bp), |
6113 | reset_phase); |
6114 | |
6115 | switch (reset_phase) { |
6116 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: |
6117 | bnx2x_func_reset_cmn(bp, drv); |
6118 | break; |
6119 | case FW_MSG_CODE_DRV_UNLOAD_PORT: |
6120 | bnx2x_func_reset_port(bp, drv); |
6121 | break; |
6122 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: |
6123 | bnx2x_func_reset_func(bp, drv); |
6124 | break; |
6125 | default: |
6126 | BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n" , |
6127 | reset_phase); |
6128 | break; |
6129 | } |
6130 | |
6131 | /* Complete the command immediately: no ramrods have been sent. */ |
6132 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); |
6133 | |
6134 | return 0; |
6135 | } |
6136 | |
6137 | static inline int bnx2x_func_send_start(struct bnx2x *bp, |
6138 | struct bnx2x_func_state_params *params) |
6139 | { |
6140 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6141 | struct function_start_data *rdata = |
6142 | (struct function_start_data *)o->rdata; |
6143 | dma_addr_t data_mapping = o->rdata_mapping; |
6144 | struct bnx2x_func_start_params *start_params = ¶ms->params.start; |
6145 | |
6146 | memset(rdata, 0, sizeof(*rdata)); |
6147 | |
6148 | /* Fill the ramrod data with provided parameters */ |
6149 | rdata->function_mode = (u8)start_params->mf_mode; |
6150 | rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); |
6151 | rdata->path_id = BP_PATH(bp); |
6152 | rdata->network_cos_mode = start_params->network_cos_mode; |
6153 | rdata->dmae_cmd_id = BNX2X_FW_DMAE_C; |
6154 | |
6155 | rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); |
6156 | rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); |
6157 | rdata->inner_clss_l2gre = start_params->inner_clss_l2gre; |
6158 | rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve; |
6159 | rdata->inner_clss_vxlan = start_params->inner_clss_vxlan; |
6160 | rdata->inner_rss = start_params->inner_rss; |
6161 | |
6162 | rdata->sd_accept_mf_clss_fail = start_params->class_fail; |
6163 | if (start_params->class_fail_ethtype) { |
6164 | rdata->sd_accept_mf_clss_fail_match_ethtype = 1; |
6165 | rdata->sd_accept_mf_clss_fail_ethtype = |
6166 | cpu_to_le16(start_params->class_fail_ethtype); |
6167 | } |
6168 | |
6169 | rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; |
6170 | rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; |
6171 | if (start_params->sd_vlan_eth_type) |
6172 | rdata->sd_vlan_eth_type = |
6173 | cpu_to_le16(start_params->sd_vlan_eth_type); |
6174 | else |
6175 | rdata->sd_vlan_eth_type = |
6176 | cpu_to_le16(0x8100); |
6177 | |
6178 | rdata->no_added_tags = start_params->no_added_tags; |
6179 | |
6180 | rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid; |
6181 | if (rdata->c2s_pri_tt_valid) { |
6182 | memcpy(rdata->c2s_pri_trans_table.val, |
6183 | start_params->c2s_pri, |
6184 | MAX_VLAN_PRIORITIES); |
6185 | rdata->c2s_pri_default = start_params->c2s_pri_default; |
6186 | } |
6187 | /* No need for an explicit memory barrier here as long we would |
6188 | * need to ensure the ordering of writing to the SPQ element |
6189 | * and updating of the SPQ producer which involves a memory |
6190 | * read and we will have to put a full memory barrier there |
6191 | * (inside bnx2x_sp_post()). |
6192 | */ |
6193 | |
6194 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_FUNCTION_START, cid: 0, |
6195 | U64_HI(data_mapping), |
6196 | U64_LO(data_mapping), cmd_type: NONE_CONNECTION_TYPE); |
6197 | } |
6198 | |
6199 | static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, |
6200 | struct bnx2x_func_state_params *params) |
6201 | { |
6202 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6203 | struct function_update_data *rdata = |
6204 | (struct function_update_data *)o->rdata; |
6205 | dma_addr_t data_mapping = o->rdata_mapping; |
6206 | struct bnx2x_func_switch_update_params *switch_update_params = |
6207 | ¶ms->params.switch_update; |
6208 | |
6209 | memset(rdata, 0, sizeof(*rdata)); |
6210 | |
6211 | /* Fill the ramrod data with provided parameters */ |
6212 | if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, |
6213 | &switch_update_params->changes)) { |
6214 | rdata->tx_switch_suspend_change_flg = 1; |
6215 | rdata->tx_switch_suspend = |
6216 | test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, |
6217 | &switch_update_params->changes); |
6218 | } |
6219 | |
6220 | if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, |
6221 | &switch_update_params->changes)) { |
6222 | rdata->sd_vlan_tag_change_flg = 1; |
6223 | rdata->sd_vlan_tag = |
6224 | cpu_to_le16(switch_update_params->vlan); |
6225 | } |
6226 | |
6227 | if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, |
6228 | &switch_update_params->changes)) { |
6229 | rdata->sd_vlan_eth_type_change_flg = 1; |
6230 | rdata->sd_vlan_eth_type = |
6231 | cpu_to_le16(switch_update_params->vlan_eth_type); |
6232 | } |
6233 | |
6234 | if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, |
6235 | &switch_update_params->changes)) { |
6236 | rdata->sd_vlan_force_pri_change_flg = 1; |
6237 | if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, |
6238 | &switch_update_params->changes)) |
6239 | rdata->sd_vlan_force_pri_flg = 1; |
6240 | rdata->sd_vlan_force_pri_flg = |
6241 | switch_update_params->vlan_force_prio; |
6242 | } |
6243 | |
6244 | if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, |
6245 | &switch_update_params->changes)) { |
6246 | rdata->update_tunn_cfg_flg = 1; |
6247 | if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, |
6248 | &switch_update_params->changes)) |
6249 | rdata->inner_clss_l2gre = 1; |
6250 | if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, |
6251 | &switch_update_params->changes)) |
6252 | rdata->inner_clss_vxlan = 1; |
6253 | if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, |
6254 | &switch_update_params->changes)) |
6255 | rdata->inner_clss_l2geneve = 1; |
6256 | if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, |
6257 | &switch_update_params->changes)) |
6258 | rdata->inner_rss = 1; |
6259 | rdata->vxlan_dst_port = |
6260 | cpu_to_le16(switch_update_params->vxlan_dst_port); |
6261 | rdata->geneve_dst_port = |
6262 | cpu_to_le16(switch_update_params->geneve_dst_port); |
6263 | } |
6264 | |
6265 | rdata->echo = SWITCH_UPDATE; |
6266 | |
6267 | /* No need for an explicit memory barrier here as long as we |
6268 | * ensure the ordering of writing to the SPQ element |
6269 | * and updating of the SPQ producer which involves a memory |
6270 | * read. If the memory read is removed we will have to put a |
6271 | * full memory barrier there (inside bnx2x_sp_post()). |
6272 | */ |
6273 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, cid: 0, |
6274 | U64_HI(data_mapping), |
6275 | U64_LO(data_mapping), cmd_type: NONE_CONNECTION_TYPE); |
6276 | } |
6277 | |
6278 | static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, |
6279 | struct bnx2x_func_state_params *params) |
6280 | { |
6281 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6282 | struct function_update_data *rdata = |
6283 | (struct function_update_data *)o->afex_rdata; |
6284 | dma_addr_t data_mapping = o->afex_rdata_mapping; |
6285 | struct bnx2x_func_afex_update_params *afex_update_params = |
6286 | ¶ms->params.afex_update; |
6287 | |
6288 | memset(rdata, 0, sizeof(*rdata)); |
6289 | |
6290 | /* Fill the ramrod data with provided parameters */ |
6291 | rdata->vif_id_change_flg = 1; |
6292 | rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); |
6293 | rdata->afex_default_vlan_change_flg = 1; |
6294 | rdata->afex_default_vlan = |
6295 | cpu_to_le16(afex_update_params->afex_default_vlan); |
6296 | rdata->allowed_priorities_change_flg = 1; |
6297 | rdata->allowed_priorities = afex_update_params->allowed_priorities; |
6298 | rdata->echo = AFEX_UPDATE; |
6299 | |
6300 | /* No need for an explicit memory barrier here as long as we |
6301 | * ensure the ordering of writing to the SPQ element |
6302 | * and updating of the SPQ producer which involves a memory |
6303 | * read. If the memory read is removed we will have to put a |
6304 | * full memory barrier there (inside bnx2x_sp_post()). |
6305 | */ |
6306 | DP(BNX2X_MSG_SP, |
6307 | "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n" , |
6308 | rdata->vif_id, |
6309 | rdata->afex_default_vlan, rdata->allowed_priorities); |
6310 | |
6311 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, cid: 0, |
6312 | U64_HI(data_mapping), |
6313 | U64_LO(data_mapping), cmd_type: NONE_CONNECTION_TYPE); |
6314 | } |
6315 | |
6316 | static |
6317 | inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, |
6318 | struct bnx2x_func_state_params *params) |
6319 | { |
6320 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6321 | struct afex_vif_list_ramrod_data *rdata = |
6322 | (struct afex_vif_list_ramrod_data *)o->afex_rdata; |
6323 | struct bnx2x_func_afex_viflists_params *afex_vif_params = |
6324 | ¶ms->params.afex_viflists; |
6325 | u64 *p_rdata = (u64 *)rdata; |
6326 | |
6327 | memset(rdata, 0, sizeof(*rdata)); |
6328 | |
6329 | /* Fill the ramrod data with provided parameters */ |
6330 | rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index); |
6331 | rdata->func_bit_map = afex_vif_params->func_bit_map; |
6332 | rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; |
6333 | rdata->func_to_clear = afex_vif_params->func_to_clear; |
6334 | |
6335 | /* send in echo type of sub command */ |
6336 | rdata->echo = afex_vif_params->afex_vif_list_command; |
6337 | |
6338 | /* No need for an explicit memory barrier here as long we would |
6339 | * need to ensure the ordering of writing to the SPQ element |
6340 | * and updating of the SPQ producer which involves a memory |
6341 | * read and we will have to put a full memory barrier there |
6342 | * (inside bnx2x_sp_post()). |
6343 | */ |
6344 | |
6345 | DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n" , |
6346 | rdata->afex_vif_list_command, rdata->vif_list_index, |
6347 | rdata->func_bit_map, rdata->func_to_clear); |
6348 | |
6349 | /* this ramrod sends data directly and not through DMA mapping */ |
6350 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, cid: 0, |
6351 | U64_HI(*p_rdata), U64_LO(*p_rdata), |
6352 | cmd_type: NONE_CONNECTION_TYPE); |
6353 | } |
6354 | |
6355 | static inline int bnx2x_func_send_stop(struct bnx2x *bp, |
6356 | struct bnx2x_func_state_params *params) |
6357 | { |
6358 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_FUNCTION_STOP, cid: 0, data_hi: 0, data_lo: 0, |
6359 | cmd_type: NONE_CONNECTION_TYPE); |
6360 | } |
6361 | |
6362 | static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, |
6363 | struct bnx2x_func_state_params *params) |
6364 | { |
6365 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, cid: 0, data_hi: 0, data_lo: 0, |
6366 | cmd_type: NONE_CONNECTION_TYPE); |
6367 | } |
6368 | static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, |
6369 | struct bnx2x_func_state_params *params) |
6370 | { |
6371 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6372 | struct flow_control_configuration *rdata = |
6373 | (struct flow_control_configuration *)o->rdata; |
6374 | dma_addr_t data_mapping = o->rdata_mapping; |
6375 | struct bnx2x_func_tx_start_params *tx_start_params = |
6376 | ¶ms->params.tx_start; |
6377 | int i; |
6378 | |
6379 | memset(rdata, 0, sizeof(*rdata)); |
6380 | |
6381 | rdata->dcb_enabled = tx_start_params->dcb_enabled; |
6382 | rdata->dcb_version = tx_start_params->dcb_version; |
6383 | rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; |
6384 | |
6385 | for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) |
6386 | rdata->traffic_type_to_priority_cos[i] = |
6387 | tx_start_params->traffic_type_to_priority_cos[i]; |
6388 | |
6389 | for (i = 0; i < MAX_TRAFFIC_TYPES; i++) |
6390 | rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i]; |
6391 | /* No need for an explicit memory barrier here as long as we |
6392 | * ensure the ordering of writing to the SPQ element |
6393 | * and updating of the SPQ producer which involves a memory |
6394 | * read. If the memory read is removed we will have to put a |
6395 | * full memory barrier there (inside bnx2x_sp_post()). |
6396 | */ |
6397 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_START_TRAFFIC, cid: 0, |
6398 | U64_HI(data_mapping), |
6399 | U64_LO(data_mapping), cmd_type: NONE_CONNECTION_TYPE); |
6400 | } |
6401 | |
6402 | static inline |
6403 | int bnx2x_func_send_set_timesync(struct bnx2x *bp, |
6404 | struct bnx2x_func_state_params *params) |
6405 | { |
6406 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6407 | struct set_timesync_ramrod_data *rdata = |
6408 | (struct set_timesync_ramrod_data *)o->rdata; |
6409 | dma_addr_t data_mapping = o->rdata_mapping; |
6410 | struct bnx2x_func_set_timesync_params *set_timesync_params = |
6411 | ¶ms->params.set_timesync; |
6412 | |
6413 | memset(rdata, 0, sizeof(*rdata)); |
6414 | |
6415 | /* Fill the ramrod data with provided parameters */ |
6416 | rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; |
6417 | rdata->offset_cmd = set_timesync_params->offset_cmd; |
6418 | rdata->add_sub_drift_adjust_value = |
6419 | set_timesync_params->add_sub_drift_adjust_value; |
6420 | rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; |
6421 | rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; |
6422 | rdata->offset_delta.lo = |
6423 | cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); |
6424 | rdata->offset_delta.hi = |
6425 | cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); |
6426 | |
6427 | DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n" , |
6428 | rdata->drift_adjust_cmd, rdata->offset_cmd, |
6429 | rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, |
6430 | rdata->drift_adjust_period, rdata->offset_delta.lo, |
6431 | rdata->offset_delta.hi); |
6432 | |
6433 | return bnx2x_sp_post(bp, command: RAMROD_CMD_ID_COMMON_SET_TIMESYNC, cid: 0, |
6434 | U64_HI(data_mapping), |
6435 | U64_LO(data_mapping), cmd_type: NONE_CONNECTION_TYPE); |
6436 | } |
6437 | |
6438 | static int bnx2x_func_send_cmd(struct bnx2x *bp, |
6439 | struct bnx2x_func_state_params *params) |
6440 | { |
6441 | switch (params->cmd) { |
6442 | case BNX2X_F_CMD_HW_INIT: |
6443 | return bnx2x_func_hw_init(bp, params); |
6444 | case BNX2X_F_CMD_START: |
6445 | return bnx2x_func_send_start(bp, params); |
6446 | case BNX2X_F_CMD_STOP: |
6447 | return bnx2x_func_send_stop(bp, params); |
6448 | case BNX2X_F_CMD_HW_RESET: |
6449 | return bnx2x_func_hw_reset(bp, params); |
6450 | case BNX2X_F_CMD_AFEX_UPDATE: |
6451 | return bnx2x_func_send_afex_update(bp, params); |
6452 | case BNX2X_F_CMD_AFEX_VIFLISTS: |
6453 | return bnx2x_func_send_afex_viflists(bp, params); |
6454 | case BNX2X_F_CMD_TX_STOP: |
6455 | return bnx2x_func_send_tx_stop(bp, params); |
6456 | case BNX2X_F_CMD_TX_START: |
6457 | return bnx2x_func_send_tx_start(bp, params); |
6458 | case BNX2X_F_CMD_SWITCH_UPDATE: |
6459 | return bnx2x_func_send_switch_update(bp, params); |
6460 | case BNX2X_F_CMD_SET_TIMESYNC: |
6461 | return bnx2x_func_send_set_timesync(bp, params); |
6462 | default: |
6463 | BNX2X_ERR("Unknown command: %d\n" , params->cmd); |
6464 | return -EINVAL; |
6465 | } |
6466 | } |
6467 | |
6468 | void bnx2x_init_func_obj(struct bnx2x *bp, |
6469 | struct bnx2x_func_sp_obj *obj, |
6470 | void *rdata, dma_addr_t rdata_mapping, |
6471 | void *afex_rdata, dma_addr_t afex_rdata_mapping, |
6472 | struct bnx2x_func_sp_drv_ops *drv_iface) |
6473 | { |
6474 | memset(obj, 0, sizeof(*obj)); |
6475 | |
6476 | mutex_init(&obj->one_pending_mutex); |
6477 | |
6478 | obj->rdata = rdata; |
6479 | obj->rdata_mapping = rdata_mapping; |
6480 | obj->afex_rdata = afex_rdata; |
6481 | obj->afex_rdata_mapping = afex_rdata_mapping; |
6482 | obj->send_cmd = bnx2x_func_send_cmd; |
6483 | obj->check_transition = bnx2x_func_chk_transition; |
6484 | obj->complete_cmd = bnx2x_func_comp_cmd; |
6485 | obj->wait_comp = bnx2x_func_wait_comp; |
6486 | |
6487 | obj->drv = drv_iface; |
6488 | } |
6489 | |
6490 | /** |
6491 | * bnx2x_func_state_change - perform Function state change transition |
6492 | * |
6493 | * @bp: device handle |
6494 | * @params: parameters to perform the transaction |
6495 | * |
6496 | * returns 0 in case of successfully completed transition, |
6497 | * negative error code in case of failure, positive |
6498 | * (EBUSY) value if there is a completion to that is |
6499 | * still pending (possible only if RAMROD_COMP_WAIT is |
6500 | * not set in params->ramrod_flags for asynchronous |
6501 | * commands). |
6502 | */ |
6503 | int bnx2x_func_state_change(struct bnx2x *bp, |
6504 | struct bnx2x_func_state_params *params) |
6505 | { |
6506 | struct bnx2x_func_sp_obj *o = params->f_obj; |
6507 | int rc, cnt = 300; |
6508 | enum bnx2x_func_cmd cmd = params->cmd; |
6509 | unsigned long *pending = &o->pending; |
6510 | |
6511 | mutex_lock(&o->one_pending_mutex); |
6512 | |
6513 | /* Check that the requested transition is legal */ |
6514 | rc = o->check_transition(bp, o, params); |
6515 | if ((rc == -EBUSY) && |
6516 | (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) { |
6517 | while ((rc == -EBUSY) && (--cnt > 0)) { |
6518 | mutex_unlock(lock: &o->one_pending_mutex); |
6519 | msleep(msecs: 10); |
6520 | mutex_lock(&o->one_pending_mutex); |
6521 | rc = o->check_transition(bp, o, params); |
6522 | } |
6523 | if (rc == -EBUSY) { |
6524 | mutex_unlock(lock: &o->one_pending_mutex); |
6525 | BNX2X_ERR("timeout waiting for previous ramrod completion\n" ); |
6526 | return rc; |
6527 | } |
6528 | } else if (rc) { |
6529 | mutex_unlock(lock: &o->one_pending_mutex); |
6530 | return rc; |
6531 | } |
6532 | |
6533 | /* Set "pending" bit */ |
6534 | set_bit(nr: cmd, addr: pending); |
6535 | |
6536 | /* Don't send a command if only driver cleanup was requested */ |
6537 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { |
6538 | bnx2x_func_state_change_comp(bp, o, cmd); |
6539 | mutex_unlock(lock: &o->one_pending_mutex); |
6540 | } else { |
6541 | /* Send a ramrod */ |
6542 | rc = o->send_cmd(bp, params); |
6543 | |
6544 | mutex_unlock(lock: &o->one_pending_mutex); |
6545 | |
6546 | if (rc) { |
6547 | o->next_state = BNX2X_F_STATE_MAX; |
6548 | clear_bit(nr: cmd, addr: pending); |
6549 | smp_mb__after_atomic(); |
6550 | return rc; |
6551 | } |
6552 | |
6553 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { |
6554 | rc = o->wait_comp(bp, o, cmd); |
6555 | if (rc) |
6556 | return rc; |
6557 | |
6558 | return 0; |
6559 | } |
6560 | } |
6561 | |
6562 | return !!test_bit(cmd, pending); |
6563 | } |
6564 | |