1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following |
14 | * conditions are met: |
15 | * |
16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. |
19 | * |
20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. |
24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. |
33 | */ |
34 | |
35 | #include <linux/skbuff.h> |
36 | #include <linux/rtnetlink.h> |
37 | #include <linux/moduleparam.h> |
38 | #include <linux/ip.h> |
39 | #include <linux/in.h> |
40 | #include <linux/igmp.h> |
41 | #include <linux/inetdevice.h> |
42 | #include <linux/delay.h> |
43 | #include <linux/completion.h> |
44 | #include <linux/slab.h> |
45 | |
46 | #include <net/dst.h> |
47 | |
48 | #include "ipoib.h" |
49 | |
50 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
51 | static int mcast_debug_level; |
52 | |
53 | module_param(mcast_debug_level, int, 0644); |
54 | MODULE_PARM_DESC(mcast_debug_level, |
55 | "Enable multicast debug tracing if > 0" ); |
56 | #endif |
57 | |
58 | struct ipoib_mcast_iter { |
59 | struct net_device *dev; |
60 | union ib_gid mgid; |
61 | unsigned long created; |
62 | unsigned int queuelen; |
63 | unsigned int complete; |
64 | unsigned int send_only; |
65 | }; |
66 | |
67 | /* join state that allows creating mcg with sendonly member request */ |
68 | #define SENDONLY_FULLMEMBER_JOIN 8 |
69 | |
70 | /* |
71 | * This should be called with the priv->lock held |
72 | */ |
73 | static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, |
74 | struct ipoib_mcast *mcast, |
75 | bool delay) |
76 | { |
77 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
78 | return; |
79 | |
80 | /* |
81 | * We will be scheduling *something*, so cancel whatever is |
82 | * currently scheduled first |
83 | */ |
84 | cancel_delayed_work(dwork: &priv->mcast_task); |
85 | if (mcast && delay) { |
86 | /* |
87 | * We had a failure and want to schedule a retry later |
88 | */ |
89 | mcast->backoff *= 2; |
90 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) |
91 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; |
92 | mcast->delay_until = jiffies + (mcast->backoff * HZ); |
93 | /* |
94 | * Mark this mcast for its delay, but restart the |
95 | * task immediately. The join task will make sure to |
96 | * clear out all entries without delays, and then |
97 | * schedule itself to run again when the earliest |
98 | * delay expires |
99 | */ |
100 | queue_delayed_work(wq: priv->wq, dwork: &priv->mcast_task, delay: 0); |
101 | } else if (delay) { |
102 | /* |
103 | * Special case of retrying after a failure to |
104 | * allocate the broadcast multicast group, wait |
105 | * 1 second and try again |
106 | */ |
107 | queue_delayed_work(wq: priv->wq, dwork: &priv->mcast_task, HZ); |
108 | } else |
109 | queue_delayed_work(wq: priv->wq, dwork: &priv->mcast_task, delay: 0); |
110 | } |
111 | |
112 | static void ipoib_mcast_free(struct ipoib_mcast *mcast) |
113 | { |
114 | struct net_device *dev = mcast->dev; |
115 | int tx_dropped = 0; |
116 | |
117 | ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n" , |
118 | mcast->mcmember.mgid.raw); |
119 | |
120 | /* remove all neigh connected to this mcast */ |
121 | ipoib_del_neighs_by_gid(dev, gid: mcast->mcmember.mgid.raw); |
122 | |
123 | if (mcast->ah) |
124 | ipoib_put_ah(ah: mcast->ah); |
125 | |
126 | while (!skb_queue_empty(list: &mcast->pkt_queue)) { |
127 | ++tx_dropped; |
128 | dev_kfree_skb_any(skb: skb_dequeue(list: &mcast->pkt_queue)); |
129 | } |
130 | |
131 | netif_tx_lock_bh(dev); |
132 | dev->stats.tx_dropped += tx_dropped; |
133 | netif_tx_unlock_bh(dev); |
134 | |
135 | kfree(objp: mcast); |
136 | } |
137 | |
138 | static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev) |
139 | { |
140 | struct ipoib_mcast *mcast; |
141 | |
142 | mcast = kzalloc(size: sizeof(*mcast), GFP_ATOMIC); |
143 | if (!mcast) |
144 | return NULL; |
145 | |
146 | mcast->dev = dev; |
147 | mcast->created = jiffies; |
148 | mcast->delay_until = jiffies; |
149 | mcast->backoff = 1; |
150 | |
151 | INIT_LIST_HEAD(list: &mcast->list); |
152 | INIT_LIST_HEAD(list: &mcast->neigh_list); |
153 | skb_queue_head_init(list: &mcast->pkt_queue); |
154 | |
155 | return mcast; |
156 | } |
157 | |
158 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) |
159 | { |
160 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
161 | struct rb_node *n = priv->multicast_tree.rb_node; |
162 | |
163 | while (n) { |
164 | struct ipoib_mcast *mcast; |
165 | int ret; |
166 | |
167 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); |
168 | |
169 | ret = memcmp(p: mgid, q: mcast->mcmember.mgid.raw, |
170 | size: sizeof (union ib_gid)); |
171 | if (ret < 0) |
172 | n = n->rb_left; |
173 | else if (ret > 0) |
174 | n = n->rb_right; |
175 | else |
176 | return mcast; |
177 | } |
178 | |
179 | return NULL; |
180 | } |
181 | |
182 | static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) |
183 | { |
184 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
185 | struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; |
186 | |
187 | while (*n) { |
188 | struct ipoib_mcast *tmcast; |
189 | int ret; |
190 | |
191 | pn = *n; |
192 | tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); |
193 | |
194 | ret = memcmp(p: mcast->mcmember.mgid.raw, q: tmcast->mcmember.mgid.raw, |
195 | size: sizeof (union ib_gid)); |
196 | if (ret < 0) |
197 | n = &pn->rb_left; |
198 | else if (ret > 0) |
199 | n = &pn->rb_right; |
200 | else |
201 | return -EEXIST; |
202 | } |
203 | |
204 | rb_link_node(node: &mcast->rb_node, parent: pn, rb_link: n); |
205 | rb_insert_color(&mcast->rb_node, &priv->multicast_tree); |
206 | |
207 | return 0; |
208 | } |
209 | |
210 | static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, |
211 | struct ib_sa_mcmember_rec *mcmember) |
212 | { |
213 | struct net_device *dev = mcast->dev; |
214 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
215 | struct rdma_netdev *rn = netdev_priv(dev); |
216 | struct ipoib_ah *ah; |
217 | struct rdma_ah_attr av; |
218 | int ret; |
219 | int set_qkey = 0; |
220 | int mtu; |
221 | |
222 | mcast->mcmember = *mcmember; |
223 | |
224 | /* Set the multicast MTU and cached Q_Key before we attach if it's |
225 | * the broadcast group. |
226 | */ |
227 | if (!memcmp(p: mcast->mcmember.mgid.raw, q: priv->dev->broadcast + 4, |
228 | size: sizeof (union ib_gid))) { |
229 | spin_lock_irq(lock: &priv->lock); |
230 | if (!priv->broadcast) { |
231 | spin_unlock_irq(lock: &priv->lock); |
232 | return -EAGAIN; |
233 | } |
234 | /*update priv member according to the new mcast*/ |
235 | priv->broadcast->mcmember.qkey = mcmember->qkey; |
236 | priv->broadcast->mcmember.mtu = mcmember->mtu; |
237 | priv->broadcast->mcmember.traffic_class = mcmember->traffic_class; |
238 | priv->broadcast->mcmember.rate = mcmember->rate; |
239 | priv->broadcast->mcmember.sl = mcmember->sl; |
240 | priv->broadcast->mcmember.flow_label = mcmember->flow_label; |
241 | priv->broadcast->mcmember.hop_limit = mcmember->hop_limit; |
242 | /* assume if the admin and the mcast are the same both can be changed */ |
243 | mtu = rdma_mtu_enum_to_int(device: priv->ca, port: priv->port, |
244 | mtu: priv->broadcast->mcmember.mtu); |
245 | if (priv->mcast_mtu == priv->admin_mtu) |
246 | priv->admin_mtu = IPOIB_UD_MTU(mtu); |
247 | priv->mcast_mtu = IPOIB_UD_MTU(mtu); |
248 | rn->mtu = priv->mcast_mtu; |
249 | |
250 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); |
251 | spin_unlock_irq(lock: &priv->lock); |
252 | priv->tx_wr.remote_qkey = priv->qkey; |
253 | set_qkey = 1; |
254 | } |
255 | |
256 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { |
257 | if (test_and_set_bit(nr: IPOIB_MCAST_FLAG_ATTACHED, addr: &mcast->flags)) { |
258 | ipoib_warn(priv, "multicast group %pI6 already attached\n" , |
259 | mcast->mcmember.mgid.raw); |
260 | |
261 | return 0; |
262 | } |
263 | |
264 | ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid, |
265 | be16_to_cpu(mcast->mcmember.mlid), |
266 | set_qkey, priv->qkey); |
267 | if (ret < 0) { |
268 | ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n" , |
269 | mcast->mcmember.mgid.raw); |
270 | |
271 | clear_bit(nr: IPOIB_MCAST_FLAG_ATTACHED, addr: &mcast->flags); |
272 | return ret; |
273 | } |
274 | } |
275 | |
276 | memset(&av, 0, sizeof(av)); |
277 | av.type = rdma_ah_find_type(dev: priv->ca, port_num: priv->port); |
278 | rdma_ah_set_dlid(attr: &av, be16_to_cpu(mcast->mcmember.mlid)); |
279 | rdma_ah_set_port_num(attr: &av, port_num: priv->port); |
280 | rdma_ah_set_sl(attr: &av, sl: mcast->mcmember.sl); |
281 | rdma_ah_set_static_rate(attr: &av, static_rate: mcast->mcmember.rate); |
282 | |
283 | rdma_ah_set_grh(attr: &av, dgid: &mcast->mcmember.mgid, |
284 | be32_to_cpu(mcast->mcmember.flow_label), |
285 | sgid_index: 0, hop_limit: mcast->mcmember.hop_limit, |
286 | traffic_class: mcast->mcmember.traffic_class); |
287 | |
288 | ah = ipoib_create_ah(dev, pd: priv->pd, attr: &av); |
289 | if (IS_ERR(ptr: ah)) { |
290 | ipoib_warn(priv, "ib_address_create failed %pe\n" , ah); |
291 | /* use original error */ |
292 | return PTR_ERR(ptr: ah); |
293 | } |
294 | spin_lock_irq(lock: &priv->lock); |
295 | mcast->ah = ah; |
296 | spin_unlock_irq(lock: &priv->lock); |
297 | |
298 | ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n" , |
299 | mcast->mcmember.mgid.raw, |
300 | mcast->ah->ah, |
301 | be16_to_cpu(mcast->mcmember.mlid), |
302 | mcast->mcmember.sl); |
303 | |
304 | /* actually send any queued packets */ |
305 | netif_tx_lock_bh(dev); |
306 | while (!skb_queue_empty(list: &mcast->pkt_queue)) { |
307 | struct sk_buff *skb = skb_dequeue(list: &mcast->pkt_queue); |
308 | |
309 | netif_tx_unlock_bh(dev); |
310 | |
311 | skb->dev = dev; |
312 | |
313 | ret = dev_queue_xmit(skb); |
314 | if (ret) |
315 | ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n" , |
316 | __func__, ret); |
317 | netif_tx_lock_bh(dev); |
318 | } |
319 | netif_tx_unlock_bh(dev); |
320 | |
321 | return 0; |
322 | } |
323 | |
324 | void ipoib_mcast_carrier_on_task(struct work_struct *work) |
325 | { |
326 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
327 | carrier_on_task); |
328 | struct ib_port_attr attr; |
329 | |
330 | if (ib_query_port(device: priv->ca, port_num: priv->port, port_attr: &attr) || |
331 | attr.state != IB_PORT_ACTIVE) { |
332 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n" ); |
333 | return; |
334 | } |
335 | /* |
336 | * Take rtnl_lock to avoid racing with ipoib_stop() and |
337 | * turning the carrier back on while a device is being |
338 | * removed. However, ipoib_stop() will attempt to flush |
339 | * the workqueue while holding the rtnl lock, so loop |
340 | * on trylock until either we get the lock or we see |
341 | * FLAG_OPER_UP go away as that signals that we are bailing |
342 | * and can safely ignore the carrier on work. |
343 | */ |
344 | while (!rtnl_trylock()) { |
345 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
346 | return; |
347 | else |
348 | msleep(msecs: 20); |
349 | } |
350 | if (!ipoib_cm_admin_enabled(dev: priv->dev)) |
351 | dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); |
352 | netif_carrier_on(dev: priv->dev); |
353 | rtnl_unlock(); |
354 | } |
355 | |
356 | static int ipoib_mcast_join_complete(int status, |
357 | struct ib_sa_multicast *multicast) |
358 | { |
359 | struct ipoib_mcast *mcast = multicast->context; |
360 | struct net_device *dev = mcast->dev; |
361 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
362 | |
363 | ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n" , |
364 | test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? |
365 | "sendonly " : "" , |
366 | mcast->mcmember.mgid.raw, status); |
367 | |
368 | /* We trap for port events ourselves. */ |
369 | if (status == -ENETRESET) { |
370 | status = 0; |
371 | goto out; |
372 | } |
373 | |
374 | if (!status) |
375 | status = ipoib_mcast_join_finish(mcast, mcmember: &multicast->rec); |
376 | |
377 | if (!status) { |
378 | mcast->backoff = 1; |
379 | mcast->delay_until = jiffies; |
380 | |
381 | /* |
382 | * Defer carrier on work to priv->wq to avoid a |
383 | * deadlock on rtnl_lock here. Requeue our multicast |
384 | * work too, which will end up happening right after |
385 | * our carrier on task work and will allow us to |
386 | * send out all of the non-broadcast joins |
387 | */ |
388 | if (mcast == priv->broadcast) { |
389 | spin_lock_irq(lock: &priv->lock); |
390 | queue_work(wq: priv->wq, work: &priv->carrier_on_task); |
391 | __ipoib_mcast_schedule_join_thread(priv, NULL, delay: 0); |
392 | goto out_locked; |
393 | } |
394 | } else { |
395 | bool silent_fail = |
396 | test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && |
397 | status == -EINVAL; |
398 | |
399 | if (mcast->logcount < 20) { |
400 | if (status == -ETIMEDOUT || status == -EAGAIN || |
401 | silent_fail) { |
402 | ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n" , |
403 | test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "" , |
404 | mcast->mcmember.mgid.raw, status); |
405 | } else { |
406 | ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n" , |
407 | test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "" , |
408 | mcast->mcmember.mgid.raw, status); |
409 | } |
410 | |
411 | if (!silent_fail) |
412 | mcast->logcount++; |
413 | } |
414 | |
415 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && |
416 | mcast->backoff >= 2) { |
417 | /* |
418 | * We only retry sendonly joins once before we drop |
419 | * the packet and quit trying to deal with the |
420 | * group. However, we leave the group in the |
421 | * mcast list as an unjoined group. If we want to |
422 | * try joining again, we simply queue up a packet |
423 | * and restart the join thread. The empty queue |
424 | * is why the join thread ignores this group. |
425 | */ |
426 | mcast->backoff = 1; |
427 | netif_tx_lock_bh(dev); |
428 | while (!skb_queue_empty(list: &mcast->pkt_queue)) { |
429 | ++dev->stats.tx_dropped; |
430 | dev_kfree_skb_any(skb: skb_dequeue(list: &mcast->pkt_queue)); |
431 | } |
432 | netif_tx_unlock_bh(dev); |
433 | } else { |
434 | spin_lock_irq(lock: &priv->lock); |
435 | /* Requeue this join task with a backoff delay */ |
436 | __ipoib_mcast_schedule_join_thread(priv, mcast, delay: 1); |
437 | goto out_locked; |
438 | } |
439 | } |
440 | out: |
441 | spin_lock_irq(lock: &priv->lock); |
442 | out_locked: |
443 | /* |
444 | * Make sure to set mcast->mc before we clear the busy flag to avoid |
445 | * racing with code that checks for BUSY before checking mcast->mc |
446 | */ |
447 | if (status) |
448 | mcast->mc = NULL; |
449 | else |
450 | mcast->mc = multicast; |
451 | clear_bit(nr: IPOIB_MCAST_FLAG_BUSY, addr: &mcast->flags); |
452 | spin_unlock_irq(lock: &priv->lock); |
453 | complete(&mcast->done); |
454 | |
455 | return status; |
456 | } |
457 | |
458 | /* |
459 | * Caller must hold 'priv->lock' |
460 | */ |
461 | static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) |
462 | { |
463 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
464 | struct ib_sa_multicast *multicast; |
465 | struct ib_sa_mcmember_rec rec = { |
466 | .join_state = 1 |
467 | }; |
468 | ib_sa_comp_mask comp_mask; |
469 | int ret = 0; |
470 | |
471 | if (!priv->broadcast || |
472 | !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
473 | return -EINVAL; |
474 | |
475 | init_completion(x: &mcast->done); |
476 | set_bit(nr: IPOIB_MCAST_FLAG_BUSY, addr: &mcast->flags); |
477 | |
478 | ipoib_dbg_mcast(priv, "joining MGID %pI6\n" , mcast->mcmember.mgid.raw); |
479 | |
480 | rec.mgid = mcast->mcmember.mgid; |
481 | rec.port_gid = priv->local_gid; |
482 | rec.pkey = cpu_to_be16(priv->pkey); |
483 | |
484 | comp_mask = |
485 | IB_SA_MCMEMBER_REC_MGID | |
486 | IB_SA_MCMEMBER_REC_PORT_GID | |
487 | IB_SA_MCMEMBER_REC_PKEY | |
488 | IB_SA_MCMEMBER_REC_JOIN_STATE; |
489 | |
490 | if (mcast != priv->broadcast) { |
491 | /* |
492 | * RFC 4391: |
493 | * The MGID MUST use the same P_Key, Q_Key, SL, MTU, |
494 | * and HopLimit as those used in the broadcast-GID. The rest |
495 | * of attributes SHOULD follow the values used in the |
496 | * broadcast-GID as well. |
497 | */ |
498 | comp_mask |= |
499 | IB_SA_MCMEMBER_REC_QKEY | |
500 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | |
501 | IB_SA_MCMEMBER_REC_MTU | |
502 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | |
503 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | |
504 | IB_SA_MCMEMBER_REC_RATE | |
505 | IB_SA_MCMEMBER_REC_SL | |
506 | IB_SA_MCMEMBER_REC_FLOW_LABEL | |
507 | IB_SA_MCMEMBER_REC_HOP_LIMIT; |
508 | |
509 | rec.qkey = priv->broadcast->mcmember.qkey; |
510 | rec.mtu_selector = IB_SA_EQ; |
511 | rec.mtu = priv->broadcast->mcmember.mtu; |
512 | rec.traffic_class = priv->broadcast->mcmember.traffic_class; |
513 | rec.rate_selector = IB_SA_EQ; |
514 | rec.rate = priv->broadcast->mcmember.rate; |
515 | rec.sl = priv->broadcast->mcmember.sl; |
516 | rec.flow_label = priv->broadcast->mcmember.flow_label; |
517 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
518 | |
519 | /* |
520 | * Send-only IB Multicast joins work at the core IB layer but |
521 | * require specific SM support. |
522 | * We can use such joins here only if the current SM supports that feature. |
523 | * However, if not, we emulate an Ethernet multicast send, |
524 | * which does not require a multicast subscription and will |
525 | * still send properly. The most appropriate thing to |
526 | * do is to create the group if it doesn't exist as that |
527 | * most closely emulates the behavior, from a user space |
528 | * application perspective, of Ethernet multicast operation. |
529 | */ |
530 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) |
531 | rec.join_state = SENDONLY_FULLMEMBER_JOIN; |
532 | } |
533 | |
534 | multicast = ib_sa_join_multicast(client: &ipoib_sa_client, device: priv->ca, port_num: priv->port, |
535 | rec: &rec, comp_mask, GFP_ATOMIC, |
536 | callback: ipoib_mcast_join_complete, context: mcast); |
537 | if (IS_ERR(ptr: multicast)) { |
538 | ret = PTR_ERR(ptr: multicast); |
539 | ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n" , ret); |
540 | /* Requeue this join task with a backoff delay */ |
541 | __ipoib_mcast_schedule_join_thread(priv, mcast, delay: 1); |
542 | clear_bit(nr: IPOIB_MCAST_FLAG_BUSY, addr: &mcast->flags); |
543 | complete(&mcast->done); |
544 | return ret; |
545 | } |
546 | return 0; |
547 | } |
548 | |
549 | void ipoib_mcast_join_task(struct work_struct *work) |
550 | { |
551 | struct ipoib_dev_priv *priv = |
552 | container_of(work, struct ipoib_dev_priv, mcast_task.work); |
553 | struct net_device *dev = priv->dev; |
554 | struct ib_port_attr port_attr; |
555 | unsigned long delay_until = 0; |
556 | struct ipoib_mcast *mcast = NULL; |
557 | |
558 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
559 | return; |
560 | |
561 | if (ib_query_port(device: priv->ca, port_num: priv->port, port_attr: &port_attr)) { |
562 | ipoib_dbg(priv, "ib_query_port() failed\n" ); |
563 | return; |
564 | } |
565 | if (port_attr.state != IB_PORT_ACTIVE) { |
566 | ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n" , |
567 | port_attr.state); |
568 | return; |
569 | } |
570 | priv->local_lid = port_attr.lid; |
571 | netif_addr_lock_bh(dev); |
572 | |
573 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
574 | netif_addr_unlock_bh(dev); |
575 | return; |
576 | } |
577 | netif_addr_unlock_bh(dev); |
578 | |
579 | spin_lock_irq(lock: &priv->lock); |
580 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
581 | goto out; |
582 | |
583 | if (!priv->broadcast) { |
584 | struct ipoib_mcast *broadcast; |
585 | |
586 | broadcast = ipoib_mcast_alloc(dev); |
587 | if (!broadcast) { |
588 | ipoib_warn(priv, "failed to allocate broadcast group\n" ); |
589 | /* |
590 | * Restart us after a 1 second delay to retry |
591 | * creating our broadcast group and attaching to |
592 | * it. Until this succeeds, this ipoib dev is |
593 | * completely stalled (multicast wise). |
594 | */ |
595 | __ipoib_mcast_schedule_join_thread(priv, NULL, delay: 1); |
596 | goto out; |
597 | } |
598 | |
599 | memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, |
600 | sizeof (union ib_gid)); |
601 | priv->broadcast = broadcast; |
602 | |
603 | __ipoib_mcast_add(dev, mcast: priv->broadcast); |
604 | } |
605 | |
606 | if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { |
607 | if (IS_ERR_OR_NULL(ptr: priv->broadcast->mc) && |
608 | !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) { |
609 | mcast = priv->broadcast; |
610 | if (mcast->backoff > 1 && |
611 | time_before(jiffies, mcast->delay_until)) { |
612 | delay_until = mcast->delay_until; |
613 | mcast = NULL; |
614 | } |
615 | } |
616 | goto out; |
617 | } |
618 | |
619 | /* |
620 | * We'll never get here until the broadcast group is both allocated |
621 | * and attached |
622 | */ |
623 | list_for_each_entry(mcast, &priv->multicast_list, list) { |
624 | if (IS_ERR_OR_NULL(ptr: mcast->mc) && |
625 | !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && |
626 | (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) || |
627 | !skb_queue_empty(list: &mcast->pkt_queue))) { |
628 | if (mcast->backoff == 1 || |
629 | time_after_eq(jiffies, mcast->delay_until)) { |
630 | /* Found the next unjoined group */ |
631 | if (ipoib_mcast_join(dev, mcast)) { |
632 | spin_unlock_irq(lock: &priv->lock); |
633 | return; |
634 | } |
635 | } else if (!delay_until || |
636 | time_before(mcast->delay_until, delay_until)) |
637 | delay_until = mcast->delay_until; |
638 | } |
639 | } |
640 | |
641 | mcast = NULL; |
642 | ipoib_dbg_mcast(priv, "successfully started all multicast joins\n" ); |
643 | |
644 | out: |
645 | if (delay_until) { |
646 | cancel_delayed_work(dwork: &priv->mcast_task); |
647 | queue_delayed_work(wq: priv->wq, dwork: &priv->mcast_task, |
648 | delay: delay_until - jiffies); |
649 | } |
650 | if (mcast) |
651 | ipoib_mcast_join(dev, mcast); |
652 | |
653 | spin_unlock_irq(lock: &priv->lock); |
654 | } |
655 | |
656 | void ipoib_mcast_start_thread(struct net_device *dev) |
657 | { |
658 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
659 | unsigned long flags; |
660 | |
661 | ipoib_dbg_mcast(priv, "starting multicast thread\n" ); |
662 | |
663 | spin_lock_irqsave(&priv->lock, flags); |
664 | __ipoib_mcast_schedule_join_thread(priv, NULL, delay: 0); |
665 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
666 | } |
667 | |
668 | void ipoib_mcast_stop_thread(struct net_device *dev) |
669 | { |
670 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
671 | |
672 | ipoib_dbg_mcast(priv, "stopping multicast thread\n" ); |
673 | |
674 | cancel_delayed_work_sync(dwork: &priv->mcast_task); |
675 | } |
676 | |
677 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) |
678 | { |
679 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
680 | struct rdma_netdev *rn = netdev_priv(dev); |
681 | int ret = 0; |
682 | |
683 | if (test_and_clear_bit(nr: IPOIB_MCAST_FLAG_BUSY, addr: &mcast->flags)) |
684 | ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n" ); |
685 | |
686 | if (!IS_ERR_OR_NULL(ptr: mcast->mc)) |
687 | ib_sa_free_multicast(multicast: mcast->mc); |
688 | |
689 | if (test_and_clear_bit(nr: IPOIB_MCAST_FLAG_ATTACHED, addr: &mcast->flags)) { |
690 | ipoib_dbg_mcast(priv, "leaving MGID %pI6\n" , |
691 | mcast->mcmember.mgid.raw); |
692 | |
693 | /* Remove ourselves from the multicast group */ |
694 | ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid, |
695 | be16_to_cpu(mcast->mcmember.mlid)); |
696 | if (ret) |
697 | ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n" , ret); |
698 | } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) |
699 | ipoib_dbg(priv, "leaving with no mcmember but not a " |
700 | "SENDONLY join\n" ); |
701 | |
702 | return 0; |
703 | } |
704 | |
705 | /* |
706 | * Check if the multicast group is sendonly. If so remove it from the maps |
707 | * and add to the remove list |
708 | */ |
709 | void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid, |
710 | struct list_head *remove_list) |
711 | { |
712 | /* Is this multicast ? */ |
713 | if (*mgid == 0xff) { |
714 | struct ipoib_mcast *mcast = __ipoib_mcast_find(dev: priv->dev, mgid); |
715 | |
716 | if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { |
717 | list_del(entry: &mcast->list); |
718 | rb_erase(&mcast->rb_node, &priv->multicast_tree); |
719 | list_add_tail(new: &mcast->list, head: remove_list); |
720 | } |
721 | } |
722 | } |
723 | |
724 | void ipoib_mcast_remove_list(struct list_head *remove_list) |
725 | { |
726 | struct ipoib_mcast *mcast, *tmcast; |
727 | |
728 | /* |
729 | * make sure the in-flight joins have finished before we attempt |
730 | * to leave |
731 | */ |
732 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) |
733 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) |
734 | wait_for_completion(&mcast->done); |
735 | |
736 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) { |
737 | ipoib_mcast_leave(dev: mcast->dev, mcast); |
738 | ipoib_mcast_free(mcast); |
739 | } |
740 | } |
741 | |
742 | void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) |
743 | { |
744 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
745 | struct rdma_netdev *rn = netdev_priv(dev); |
746 | struct ipoib_mcast *mcast; |
747 | unsigned long flags; |
748 | void *mgid = daddr + 4; |
749 | |
750 | spin_lock_irqsave(&priv->lock, flags); |
751 | |
752 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || |
753 | !priv->broadcast || |
754 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { |
755 | ++dev->stats.tx_dropped; |
756 | dev_kfree_skb_any(skb); |
757 | goto unlock; |
758 | } |
759 | |
760 | mcast = __ipoib_mcast_find(dev, mgid); |
761 | if (!mcast || !mcast->ah) { |
762 | if (!mcast) { |
763 | /* Let's create a new send only group now */ |
764 | ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n" , |
765 | mgid); |
766 | |
767 | mcast = ipoib_mcast_alloc(dev); |
768 | if (!mcast) { |
769 | ipoib_warn(priv, "unable to allocate memory " |
770 | "for multicast structure\n" ); |
771 | ++dev->stats.tx_dropped; |
772 | dev_kfree_skb_any(skb); |
773 | goto unlock; |
774 | } |
775 | |
776 | set_bit(nr: IPOIB_MCAST_FLAG_SENDONLY, addr: &mcast->flags); |
777 | memcpy(mcast->mcmember.mgid.raw, mgid, |
778 | sizeof (union ib_gid)); |
779 | __ipoib_mcast_add(dev, mcast); |
780 | list_add_tail(new: &mcast->list, head: &priv->multicast_list); |
781 | } |
782 | if (skb_queue_len(list_: &mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { |
783 | /* put pseudoheader back on for next time */ |
784 | skb_push(skb, len: sizeof(struct ipoib_pseudo_header)); |
785 | skb_queue_tail(list: &mcast->pkt_queue, newsk: skb); |
786 | } else { |
787 | ++dev->stats.tx_dropped; |
788 | dev_kfree_skb_any(skb); |
789 | } |
790 | if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { |
791 | __ipoib_mcast_schedule_join_thread(priv, NULL, delay: 0); |
792 | } |
793 | } else { |
794 | struct ipoib_neigh *neigh; |
795 | |
796 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
797 | neigh = ipoib_neigh_get(dev, daddr); |
798 | spin_lock_irqsave(&priv->lock, flags); |
799 | if (!neigh) { |
800 | neigh = ipoib_neigh_alloc(daddr, dev); |
801 | /* Make sure that the neigh will be added only |
802 | * once to mcast list. |
803 | */ |
804 | if (neigh && list_empty(head: &neigh->list)) { |
805 | kref_get(kref: &mcast->ah->ref); |
806 | neigh->ah = mcast->ah; |
807 | neigh->ah->valid = 1; |
808 | list_add_tail(new: &neigh->list, head: &mcast->neigh_list); |
809 | } |
810 | } |
811 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
812 | mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah, |
813 | IB_MULTICAST_QPN); |
814 | if (neigh) |
815 | ipoib_neigh_put(neigh); |
816 | return; |
817 | } |
818 | |
819 | unlock: |
820 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
821 | } |
822 | |
823 | void ipoib_mcast_dev_flush(struct net_device *dev) |
824 | { |
825 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
826 | LIST_HEAD(remove_list); |
827 | struct ipoib_mcast *mcast, *tmcast; |
828 | unsigned long flags; |
829 | |
830 | mutex_lock(&priv->mcast_mutex); |
831 | ipoib_dbg_mcast(priv, "flushing multicast list\n" ); |
832 | |
833 | spin_lock_irqsave(&priv->lock, flags); |
834 | |
835 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { |
836 | list_del(entry: &mcast->list); |
837 | rb_erase(&mcast->rb_node, &priv->multicast_tree); |
838 | list_add_tail(new: &mcast->list, head: &remove_list); |
839 | } |
840 | |
841 | if (priv->broadcast) { |
842 | rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); |
843 | list_add_tail(new: &priv->broadcast->list, head: &remove_list); |
844 | priv->broadcast = NULL; |
845 | } |
846 | |
847 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
848 | |
849 | ipoib_mcast_remove_list(remove_list: &remove_list); |
850 | mutex_unlock(lock: &priv->mcast_mutex); |
851 | } |
852 | |
853 | static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) |
854 | { |
855 | /* reserved QPN, prefix, scope */ |
856 | if (memcmp(p: addr, q: broadcast, size: 6)) |
857 | return 0; |
858 | /* signature lower, pkey */ |
859 | if (memcmp(p: addr + 7, q: broadcast + 7, size: 3)) |
860 | return 0; |
861 | return 1; |
862 | } |
863 | |
864 | void ipoib_mcast_restart_task(struct work_struct *work) |
865 | { |
866 | struct ipoib_dev_priv *priv = |
867 | container_of(work, struct ipoib_dev_priv, restart_task); |
868 | struct net_device *dev = priv->dev; |
869 | struct netdev_hw_addr *ha; |
870 | struct ipoib_mcast *mcast, *tmcast; |
871 | LIST_HEAD(remove_list); |
872 | struct ib_sa_mcmember_rec rec; |
873 | |
874 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
875 | /* |
876 | * shortcut...on shutdown flush is called next, just |
877 | * let it do all the work |
878 | */ |
879 | return; |
880 | |
881 | ipoib_dbg_mcast(priv, "restarting multicast task\n" ); |
882 | |
883 | netif_addr_lock_bh(dev); |
884 | spin_lock_irq(lock: &priv->lock); |
885 | |
886 | /* |
887 | * Unfortunately, the networking core only gives us a list of all of |
888 | * the multicast hardware addresses. We need to figure out which ones |
889 | * are new and which ones have been removed |
890 | */ |
891 | |
892 | /* Clear out the found flag */ |
893 | list_for_each_entry(mcast, &priv->multicast_list, list) |
894 | clear_bit(nr: IPOIB_MCAST_FLAG_FOUND, addr: &mcast->flags); |
895 | |
896 | /* Mark all of the entries that are found or don't exist */ |
897 | netdev_for_each_mc_addr(ha, dev) { |
898 | union ib_gid mgid; |
899 | |
900 | if (!ipoib_mcast_addr_is_valid(addr: ha->addr, broadcast: dev->broadcast)) |
901 | continue; |
902 | |
903 | memcpy(mgid.raw, ha->addr + 4, sizeof(mgid)); |
904 | |
905 | mcast = __ipoib_mcast_find(dev, mgid: &mgid); |
906 | if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { |
907 | struct ipoib_mcast *nmcast; |
908 | |
909 | /* ignore group which is directly joined by userspace */ |
910 | if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && |
911 | !ib_sa_get_mcmember_rec(device: priv->ca, port_num: priv->port, mgid: &mgid, rec: &rec)) { |
912 | ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n" , |
913 | mgid.raw); |
914 | continue; |
915 | } |
916 | |
917 | /* Not found or send-only group, let's add a new entry */ |
918 | ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n" , |
919 | mgid.raw); |
920 | |
921 | nmcast = ipoib_mcast_alloc(dev); |
922 | if (!nmcast) { |
923 | ipoib_warn(priv, "unable to allocate memory for multicast structure\n" ); |
924 | continue; |
925 | } |
926 | |
927 | set_bit(nr: IPOIB_MCAST_FLAG_FOUND, addr: &nmcast->flags); |
928 | |
929 | nmcast->mcmember.mgid = mgid; |
930 | |
931 | if (mcast) { |
932 | /* Destroy the send only entry */ |
933 | list_move_tail(list: &mcast->list, head: &remove_list); |
934 | |
935 | rb_replace_node(victim: &mcast->rb_node, |
936 | new: &nmcast->rb_node, |
937 | root: &priv->multicast_tree); |
938 | } else |
939 | __ipoib_mcast_add(dev, mcast: nmcast); |
940 | |
941 | list_add_tail(new: &nmcast->list, head: &priv->multicast_list); |
942 | } |
943 | |
944 | if (mcast) |
945 | set_bit(nr: IPOIB_MCAST_FLAG_FOUND, addr: &mcast->flags); |
946 | } |
947 | |
948 | /* Remove all of the entries don't exist anymore */ |
949 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { |
950 | if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && |
951 | !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { |
952 | ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n" , |
953 | mcast->mcmember.mgid.raw); |
954 | |
955 | rb_erase(&mcast->rb_node, &priv->multicast_tree); |
956 | |
957 | /* Move to the remove list */ |
958 | list_move_tail(list: &mcast->list, head: &remove_list); |
959 | } |
960 | } |
961 | |
962 | spin_unlock_irq(lock: &priv->lock); |
963 | netif_addr_unlock_bh(dev); |
964 | |
965 | ipoib_mcast_remove_list(remove_list: &remove_list); |
966 | |
967 | /* |
968 | * Double check that we are still up |
969 | */ |
970 | if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { |
971 | spin_lock_irq(lock: &priv->lock); |
972 | __ipoib_mcast_schedule_join_thread(priv, NULL, delay: 0); |
973 | spin_unlock_irq(lock: &priv->lock); |
974 | } |
975 | } |
976 | |
977 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
978 | |
979 | struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) |
980 | { |
981 | struct ipoib_mcast_iter *iter; |
982 | |
983 | iter = kmalloc(size: sizeof(*iter), GFP_KERNEL); |
984 | if (!iter) |
985 | return NULL; |
986 | |
987 | iter->dev = dev; |
988 | memset(iter->mgid.raw, 0, 16); |
989 | |
990 | if (ipoib_mcast_iter_next(iter)) { |
991 | kfree(objp: iter); |
992 | return NULL; |
993 | } |
994 | |
995 | return iter; |
996 | } |
997 | |
998 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) |
999 | { |
1000 | struct ipoib_dev_priv *priv = ipoib_priv(dev: iter->dev); |
1001 | struct rb_node *n; |
1002 | struct ipoib_mcast *mcast; |
1003 | int ret = 1; |
1004 | |
1005 | spin_lock_irq(lock: &priv->lock); |
1006 | |
1007 | n = rb_first(&priv->multicast_tree); |
1008 | |
1009 | while (n) { |
1010 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); |
1011 | |
1012 | if (memcmp(p: iter->mgid.raw, q: mcast->mcmember.mgid.raw, |
1013 | size: sizeof (union ib_gid)) < 0) { |
1014 | iter->mgid = mcast->mcmember.mgid; |
1015 | iter->created = mcast->created; |
1016 | iter->queuelen = skb_queue_len(list_: &mcast->pkt_queue); |
1017 | iter->complete = !!mcast->ah; |
1018 | iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); |
1019 | |
1020 | ret = 0; |
1021 | |
1022 | break; |
1023 | } |
1024 | |
1025 | n = rb_next(n); |
1026 | } |
1027 | |
1028 | spin_unlock_irq(lock: &priv->lock); |
1029 | |
1030 | return ret; |
1031 | } |
1032 | |
1033 | void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, |
1034 | union ib_gid *mgid, |
1035 | unsigned long *created, |
1036 | unsigned int *queuelen, |
1037 | unsigned int *complete, |
1038 | unsigned int *send_only) |
1039 | { |
1040 | *mgid = iter->mgid; |
1041 | *created = iter->created; |
1042 | *queuelen = iter->queuelen; |
1043 | *complete = iter->complete; |
1044 | *send_only = iter->send_only; |
1045 | } |
1046 | |
1047 | #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ |
1048 | |