1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Routing netlink socket interface: protocol independent part. |
8 | * |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
10 | * |
11 | * Fixes: |
12 | * Vitaly E. Lavrov RTA_OK arithmetic was wrong. |
13 | */ |
14 | |
15 | #include <linux/bitops.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/module.h> |
18 | #include <linux/types.h> |
19 | #include <linux/socket.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/timer.h> |
22 | #include <linux/string.h> |
23 | #include <linux/sockios.h> |
24 | #include <linux/net.h> |
25 | #include <linux/fcntl.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/interrupt.h> |
29 | #include <linux/capability.h> |
30 | #include <linux/skbuff.h> |
31 | #include <linux/init.h> |
32 | #include <linux/security.h> |
33 | #include <linux/mutex.h> |
34 | #include <linux/if_addr.h> |
35 | #include <linux/if_bridge.h> |
36 | #include <linux/if_vlan.h> |
37 | #include <linux/pci.h> |
38 | #include <linux/etherdevice.h> |
39 | #include <linux/bpf.h> |
40 | |
41 | #include <linux/uaccess.h> |
42 | |
43 | #include <linux/inet.h> |
44 | #include <linux/netdevice.h> |
45 | #include <net/ip.h> |
46 | #include <net/protocol.h> |
47 | #include <net/arp.h> |
48 | #include <net/route.h> |
49 | #include <net/udp.h> |
50 | #include <net/tcp.h> |
51 | #include <net/sock.h> |
52 | #include <net/pkt_sched.h> |
53 | #include <net/fib_rules.h> |
54 | #include <net/rtnetlink.h> |
55 | #include <net/net_namespace.h> |
56 | #include <net/netdev_lock.h> |
57 | #include <net/devlink.h> |
58 | #if IS_ENABLED(CONFIG_IPV6) |
59 | #include <net/addrconf.h> |
60 | #endif |
61 | #include <linux/dpll.h> |
62 | |
63 | #include "dev.h" |
64 | |
65 | #define RTNL_MAX_TYPE 50 |
66 | #define RTNL_SLAVE_MAX_TYPE 44 |
67 | |
68 | struct rtnl_link { |
69 | rtnl_doit_func doit; |
70 | rtnl_dumpit_func dumpit; |
71 | struct module *owner; |
72 | unsigned int flags; |
73 | struct rcu_head rcu; |
74 | }; |
75 | |
76 | static DEFINE_MUTEX(rtnl_mutex); |
77 | |
78 | void rtnl_lock(void) |
79 | { |
80 | mutex_lock(&rtnl_mutex); |
81 | } |
82 | EXPORT_SYMBOL(rtnl_lock); |
83 | |
84 | int rtnl_lock_interruptible(void) |
85 | { |
86 | return mutex_lock_interruptible(&rtnl_mutex); |
87 | } |
88 | |
89 | int rtnl_lock_killable(void) |
90 | { |
91 | return mutex_lock_killable(&rtnl_mutex); |
92 | } |
93 | |
94 | static struct sk_buff *defer_kfree_skb_list; |
95 | void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) |
96 | { |
97 | if (head && tail) { |
98 | tail->next = defer_kfree_skb_list; |
99 | defer_kfree_skb_list = head; |
100 | } |
101 | } |
102 | EXPORT_SYMBOL(rtnl_kfree_skbs); |
103 | |
104 | void __rtnl_unlock(void) |
105 | { |
106 | struct sk_buff *head = defer_kfree_skb_list; |
107 | |
108 | defer_kfree_skb_list = NULL; |
109 | |
110 | /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() |
111 | * is used. In some places, e.g. in cfg80211, we have code that will do |
112 | * something like |
113 | * rtnl_lock() |
114 | * wiphy_lock() |
115 | * ... |
116 | * rtnl_unlock() |
117 | * |
118 | * and because netdev_run_todo() acquires the RTNL for items on the list |
119 | * we could cause a situation such as this: |
120 | * Thread 1 Thread 2 |
121 | * rtnl_lock() |
122 | * unregister_netdevice() |
123 | * __rtnl_unlock() |
124 | * rtnl_lock() |
125 | * wiphy_lock() |
126 | * rtnl_unlock() |
127 | * netdev_run_todo() |
128 | * __rtnl_unlock() |
129 | * |
130 | * // list not empty now |
131 | * // because of thread 2 |
132 | * rtnl_lock() |
133 | * while (!list_empty(...)) |
134 | * rtnl_lock() |
135 | * wiphy_lock() |
136 | * **** DEADLOCK **** |
137 | * |
138 | * However, usage of __rtnl_unlock() is rare, and so we can ensure that |
139 | * it's not used in cases where something is added to do the list. |
140 | */ |
141 | WARN_ON(!list_empty(&net_todo_list)); |
142 | |
143 | mutex_unlock(lock: &rtnl_mutex); |
144 | |
145 | while (head) { |
146 | struct sk_buff *next = head->next; |
147 | |
148 | kfree_skb(skb: head); |
149 | cond_resched(); |
150 | head = next; |
151 | } |
152 | } |
153 | |
154 | void rtnl_unlock(void) |
155 | { |
156 | /* This fellow will unlock it for us. */ |
157 | netdev_run_todo(); |
158 | } |
159 | EXPORT_SYMBOL(rtnl_unlock); |
160 | |
161 | int rtnl_trylock(void) |
162 | { |
163 | return mutex_trylock(&rtnl_mutex); |
164 | } |
165 | EXPORT_SYMBOL(rtnl_trylock); |
166 | |
167 | int rtnl_is_locked(void) |
168 | { |
169 | return mutex_is_locked(lock: &rtnl_mutex); |
170 | } |
171 | EXPORT_SYMBOL(rtnl_is_locked); |
172 | |
173 | bool refcount_dec_and_rtnl_lock(refcount_t *r) |
174 | { |
175 | return refcount_dec_and_mutex_lock(r, lock: &rtnl_mutex); |
176 | } |
177 | EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); |
178 | |
179 | #ifdef CONFIG_PROVE_LOCKING |
180 | bool lockdep_rtnl_is_held(void) |
181 | { |
182 | return lockdep_is_held(&rtnl_mutex); |
183 | } |
184 | EXPORT_SYMBOL(lockdep_rtnl_is_held); |
185 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
186 | |
187 | #ifdef CONFIG_DEBUG_NET_SMALL_RTNL |
188 | void __rtnl_net_lock(struct net *net) |
189 | { |
190 | ASSERT_RTNL(); |
191 | |
192 | mutex_lock(&net->rtnl_mutex); |
193 | } |
194 | EXPORT_SYMBOL(__rtnl_net_lock); |
195 | |
196 | void __rtnl_net_unlock(struct net *net) |
197 | { |
198 | ASSERT_RTNL(); |
199 | |
200 | mutex_unlock(lock: &net->rtnl_mutex); |
201 | } |
202 | EXPORT_SYMBOL(__rtnl_net_unlock); |
203 | |
204 | void rtnl_net_lock(struct net *net) |
205 | { |
206 | rtnl_lock(); |
207 | __rtnl_net_lock(net); |
208 | } |
209 | EXPORT_SYMBOL(rtnl_net_lock); |
210 | |
211 | void rtnl_net_unlock(struct net *net) |
212 | { |
213 | __rtnl_net_unlock(net); |
214 | rtnl_unlock(); |
215 | } |
216 | EXPORT_SYMBOL(rtnl_net_unlock); |
217 | |
218 | int rtnl_net_trylock(struct net *net) |
219 | { |
220 | int ret = rtnl_trylock(); |
221 | |
222 | if (ret) |
223 | __rtnl_net_lock(net); |
224 | |
225 | return ret; |
226 | } |
227 | EXPORT_SYMBOL(rtnl_net_trylock); |
228 | |
229 | int rtnl_net_lock_killable(struct net *net) |
230 | { |
231 | int ret = rtnl_lock_killable(); |
232 | |
233 | if (!ret) |
234 | __rtnl_net_lock(net); |
235 | |
236 | return ret; |
237 | } |
238 | |
239 | static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b) |
240 | { |
241 | if (net_eq(net1: net_a, net2: net_b)) |
242 | return 0; |
243 | |
244 | /* always init_net first */ |
245 | if (net_eq(net1: net_a, net2: &init_net)) |
246 | return -1; |
247 | |
248 | if (net_eq(net1: net_b, net2: &init_net)) |
249 | return 1; |
250 | |
251 | /* otherwise lock in ascending order */ |
252 | return net_a < net_b ? -1 : 1; |
253 | } |
254 | |
255 | int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) |
256 | { |
257 | const struct net *net_a, *net_b; |
258 | |
259 | net_a = container_of(a, struct net, rtnl_mutex.dep_map); |
260 | net_b = container_of(b, struct net, rtnl_mutex.dep_map); |
261 | |
262 | return rtnl_net_cmp_locks(net_a, net_b); |
263 | } |
264 | |
265 | bool rtnl_net_is_locked(struct net *net) |
266 | { |
267 | return rtnl_is_locked() && mutex_is_locked(lock: &net->rtnl_mutex); |
268 | } |
269 | EXPORT_SYMBOL(rtnl_net_is_locked); |
270 | |
271 | bool lockdep_rtnl_net_is_held(struct net *net) |
272 | { |
273 | return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex); |
274 | } |
275 | EXPORT_SYMBOL(lockdep_rtnl_net_is_held); |
276 | #else |
277 | static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b) |
278 | { |
279 | /* No need to swap */ |
280 | return -1; |
281 | } |
282 | #endif |
283 | |
284 | struct rtnl_nets { |
285 | /* ->newlink() needs to freeze 3 netns at most; |
286 | * 2 for the new device, 1 for its peer. |
287 | */ |
288 | struct net *net[3]; |
289 | unsigned char len; |
290 | }; |
291 | |
292 | static void rtnl_nets_init(struct rtnl_nets *rtnl_nets) |
293 | { |
294 | memset(rtnl_nets, 0, sizeof(*rtnl_nets)); |
295 | } |
296 | |
297 | static void rtnl_nets_destroy(struct rtnl_nets *rtnl_nets) |
298 | { |
299 | int i; |
300 | |
301 | for (i = 0; i < rtnl_nets->len; i++) { |
302 | put_net(net: rtnl_nets->net[i]); |
303 | rtnl_nets->net[i] = NULL; |
304 | } |
305 | |
306 | rtnl_nets->len = 0; |
307 | } |
308 | |
309 | /** |
310 | * rtnl_nets_add - Add netns to be locked before ->newlink(). |
311 | * |
312 | * @rtnl_nets: rtnl_nets pointer passed to ->get_peer_net(). |
313 | * @net: netns pointer with an extra refcnt held. |
314 | * |
315 | * The extra refcnt is released in rtnl_nets_destroy(). |
316 | */ |
317 | static void rtnl_nets_add(struct rtnl_nets *rtnl_nets, struct net *net) |
318 | { |
319 | int i; |
320 | |
321 | DEBUG_NET_WARN_ON_ONCE(rtnl_nets->len == ARRAY_SIZE(rtnl_nets->net)); |
322 | |
323 | for (i = 0; i < rtnl_nets->len; i++) { |
324 | switch (rtnl_net_cmp_locks(net_a: rtnl_nets->net[i], net_b: net)) { |
325 | case 0: |
326 | put_net(net); |
327 | return; |
328 | case 1: |
329 | swap(rtnl_nets->net[i], net); |
330 | } |
331 | } |
332 | |
333 | rtnl_nets->net[i] = net; |
334 | rtnl_nets->len++; |
335 | } |
336 | |
337 | static void rtnl_nets_lock(struct rtnl_nets *rtnl_nets) |
338 | { |
339 | int i; |
340 | |
341 | rtnl_lock(); |
342 | |
343 | for (i = 0; i < rtnl_nets->len; i++) |
344 | __rtnl_net_lock(rtnl_nets->net[i]); |
345 | } |
346 | |
347 | static void rtnl_nets_unlock(struct rtnl_nets *rtnl_nets) |
348 | { |
349 | int i; |
350 | |
351 | for (i = 0; i < rtnl_nets->len; i++) |
352 | __rtnl_net_unlock(rtnl_nets->net[i]); |
353 | |
354 | rtnl_unlock(); |
355 | } |
356 | |
357 | static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; |
358 | |
359 | static inline int rtm_msgindex(int msgtype) |
360 | { |
361 | int msgindex = msgtype - RTM_BASE; |
362 | |
363 | /* |
364 | * msgindex < 0 implies someone tried to register a netlink |
365 | * control code. msgindex >= RTM_NR_MSGTYPES may indicate that |
366 | * the message type has not been added to linux/rtnetlink.h |
367 | */ |
368 | BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); |
369 | |
370 | return msgindex; |
371 | } |
372 | |
373 | static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) |
374 | { |
375 | struct rtnl_link __rcu **tab; |
376 | |
377 | if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) |
378 | protocol = PF_UNSPEC; |
379 | |
380 | tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); |
381 | if (!tab) |
382 | tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); |
383 | |
384 | return rcu_dereference_rtnl(tab[msgtype]); |
385 | } |
386 | |
387 | static int rtnl_register_internal(struct module *owner, |
388 | int protocol, int msgtype, |
389 | rtnl_doit_func doit, rtnl_dumpit_func dumpit, |
390 | unsigned int flags) |
391 | { |
392 | struct rtnl_link *link, *old; |
393 | struct rtnl_link __rcu **tab; |
394 | int msgindex; |
395 | int ret = -ENOBUFS; |
396 | |
397 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
398 | msgindex = rtm_msgindex(msgtype); |
399 | |
400 | rtnl_lock(); |
401 | tab = rtnl_dereference(rtnl_msg_handlers[protocol]); |
402 | if (tab == NULL) { |
403 | tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); |
404 | if (!tab) |
405 | goto unlock; |
406 | |
407 | /* ensures we see the 0 stores */ |
408 | rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); |
409 | } |
410 | |
411 | old = rtnl_dereference(tab[msgindex]); |
412 | if (old) { |
413 | link = kmemdup(old, sizeof(*old), GFP_KERNEL); |
414 | if (!link) |
415 | goto unlock; |
416 | } else { |
417 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
418 | if (!link) |
419 | goto unlock; |
420 | } |
421 | |
422 | WARN_ON(link->owner && link->owner != owner); |
423 | link->owner = owner; |
424 | |
425 | WARN_ON(doit && link->doit && link->doit != doit); |
426 | if (doit) |
427 | link->doit = doit; |
428 | WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); |
429 | if (dumpit) |
430 | link->dumpit = dumpit; |
431 | |
432 | WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && |
433 | (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); |
434 | link->flags |= flags; |
435 | |
436 | /* publish protocol:msgtype */ |
437 | rcu_assign_pointer(tab[msgindex], link); |
438 | ret = 0; |
439 | if (old) |
440 | kfree_rcu(old, rcu); |
441 | unlock: |
442 | rtnl_unlock(); |
443 | return ret; |
444 | } |
445 | |
446 | /** |
447 | * rtnl_unregister - Unregister a rtnetlink message type |
448 | * @protocol: Protocol family or PF_UNSPEC |
449 | * @msgtype: rtnetlink message type |
450 | * |
451 | * Returns 0 on success or a negative error code. |
452 | */ |
453 | static int rtnl_unregister(int protocol, int msgtype) |
454 | { |
455 | struct rtnl_link __rcu **tab; |
456 | struct rtnl_link *link; |
457 | int msgindex; |
458 | |
459 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
460 | msgindex = rtm_msgindex(msgtype); |
461 | |
462 | rtnl_lock(); |
463 | tab = rtnl_dereference(rtnl_msg_handlers[protocol]); |
464 | if (!tab) { |
465 | rtnl_unlock(); |
466 | return -ENOENT; |
467 | } |
468 | |
469 | link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); |
470 | rtnl_unlock(); |
471 | |
472 | kfree_rcu(link, rcu); |
473 | |
474 | return 0; |
475 | } |
476 | |
477 | /** |
478 | * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol |
479 | * @protocol : Protocol family or PF_UNSPEC |
480 | * |
481 | * Identical to calling rtnl_unregster() for all registered message types |
482 | * of a certain protocol family. |
483 | */ |
484 | void rtnl_unregister_all(int protocol) |
485 | { |
486 | struct rtnl_link __rcu **tab; |
487 | struct rtnl_link *link; |
488 | int msgindex; |
489 | |
490 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
491 | |
492 | rtnl_lock(); |
493 | tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL); |
494 | if (!tab) { |
495 | rtnl_unlock(); |
496 | return; |
497 | } |
498 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { |
499 | link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); |
500 | kfree_rcu(link, rcu); |
501 | } |
502 | rtnl_unlock(); |
503 | |
504 | synchronize_net(); |
505 | |
506 | kfree(objp: tab); |
507 | } |
508 | EXPORT_SYMBOL_GPL(rtnl_unregister_all); |
509 | |
510 | /** |
511 | * __rtnl_register_many - Register rtnetlink message types |
512 | * @handlers: Array of struct rtnl_msg_handlers |
513 | * @n: The length of @handlers |
514 | * |
515 | * Registers the specified function pointers (at least one of them has |
516 | * to be non-NULL) to be called whenever a request message for the |
517 | * specified protocol family and message type is received. |
518 | * |
519 | * The special protocol family PF_UNSPEC may be used to define fallback |
520 | * function pointers for the case when no entry for the specific protocol |
521 | * family exists. |
522 | * |
523 | * When one element of @handlers fails to register, |
524 | * 1) built-in: panics. |
525 | * 2) modules : the previous successful registrations are unwinded |
526 | * and an error is returned. |
527 | * |
528 | * Use rtnl_register_many(). |
529 | */ |
530 | int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) |
531 | { |
532 | const struct rtnl_msg_handler *handler; |
533 | int i, err; |
534 | |
535 | for (i = 0, handler = handlers; i < n; i++, handler++) { |
536 | err = rtnl_register_internal(owner: handler->owner, protocol: handler->protocol, |
537 | msgtype: handler->msgtype, doit: handler->doit, |
538 | dumpit: handler->dumpit, flags: handler->flags); |
539 | if (err) { |
540 | if (!handler->owner) |
541 | panic(fmt: "Unable to register rtnetlink message " |
542 | "handlers, %pS\n" , handlers); |
543 | |
544 | __rtnl_unregister_many(handlers, n: i); |
545 | break; |
546 | } |
547 | } |
548 | |
549 | return err; |
550 | } |
551 | EXPORT_SYMBOL_GPL(__rtnl_register_many); |
552 | |
553 | void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n) |
554 | { |
555 | const struct rtnl_msg_handler *handler; |
556 | int i; |
557 | |
558 | for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--) |
559 | rtnl_unregister(protocol: handler->protocol, msgtype: handler->msgtype); |
560 | } |
561 | EXPORT_SYMBOL_GPL(__rtnl_unregister_many); |
562 | |
563 | static DEFINE_MUTEX(link_ops_mutex); |
564 | static LIST_HEAD(link_ops); |
565 | |
566 | static struct rtnl_link_ops *rtnl_link_ops_get(const char *kind, int *srcu_index) |
567 | { |
568 | struct rtnl_link_ops *ops; |
569 | |
570 | rcu_read_lock(); |
571 | |
572 | list_for_each_entry_rcu(ops, &link_ops, list) { |
573 | if (!strcmp(ops->kind, kind)) { |
574 | *srcu_index = srcu_read_lock(ssp: &ops->srcu); |
575 | goto unlock; |
576 | } |
577 | } |
578 | |
579 | ops = NULL; |
580 | unlock: |
581 | rcu_read_unlock(); |
582 | |
583 | return ops; |
584 | } |
585 | |
586 | static void rtnl_link_ops_put(struct rtnl_link_ops *ops, int srcu_index) |
587 | { |
588 | srcu_read_unlock(ssp: &ops->srcu, idx: srcu_index); |
589 | } |
590 | |
591 | /** |
592 | * rtnl_link_register - Register rtnl_link_ops with rtnetlink. |
593 | * @ops: struct rtnl_link_ops * to register |
594 | * |
595 | * Returns 0 on success or a negative error code. |
596 | */ |
597 | int rtnl_link_register(struct rtnl_link_ops *ops) |
598 | { |
599 | struct rtnl_link_ops *tmp; |
600 | int err; |
601 | |
602 | /* Sanity-check max sizes to avoid stack buffer overflow. */ |
603 | if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || |
604 | ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) |
605 | return -EINVAL; |
606 | |
607 | /* The check for alloc/setup is here because if ops |
608 | * does not have that filled up, it is not possible |
609 | * to use the ops for creating device. So do not |
610 | * fill up dellink as well. That disables rtnl_dellink. |
611 | */ |
612 | if ((ops->alloc || ops->setup) && !ops->dellink) |
613 | ops->dellink = unregister_netdevice_queue; |
614 | |
615 | err = init_srcu_struct(&ops->srcu); |
616 | if (err) |
617 | return err; |
618 | |
619 | mutex_lock(&link_ops_mutex); |
620 | |
621 | list_for_each_entry(tmp, &link_ops, list) { |
622 | if (!strcmp(ops->kind, tmp->kind)) { |
623 | err = -EEXIST; |
624 | goto unlock; |
625 | } |
626 | } |
627 | |
628 | list_add_tail_rcu(new: &ops->list, head: &link_ops); |
629 | unlock: |
630 | mutex_unlock(lock: &link_ops_mutex); |
631 | |
632 | return err; |
633 | } |
634 | EXPORT_SYMBOL_GPL(rtnl_link_register); |
635 | |
636 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
637 | { |
638 | struct net_device *dev; |
639 | LIST_HEAD(list_kill); |
640 | |
641 | for_each_netdev(net, dev) { |
642 | if (dev->rtnl_link_ops == ops) |
643 | ops->dellink(dev, &list_kill); |
644 | } |
645 | unregister_netdevice_many(head: &list_kill); |
646 | } |
647 | |
648 | /* Return with the rtnl_lock held when there are no network |
649 | * devices unregistering in any network namespace. |
650 | */ |
651 | static void rtnl_lock_unregistering_all(void) |
652 | { |
653 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
654 | |
655 | add_wait_queue(wq_head: &netdev_unregistering_wq, wq_entry: &wait); |
656 | for (;;) { |
657 | rtnl_lock(); |
658 | /* We held write locked pernet_ops_rwsem, and parallel |
659 | * setup_net() and cleanup_net() are not possible. |
660 | */ |
661 | if (!atomic_read(v: &dev_unreg_count)) |
662 | break; |
663 | __rtnl_unlock(); |
664 | |
665 | wait_woken(wq_entry: &wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
666 | } |
667 | remove_wait_queue(wq_head: &netdev_unregistering_wq, wq_entry: &wait); |
668 | } |
669 | |
670 | /** |
671 | * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. |
672 | * @ops: struct rtnl_link_ops * to unregister |
673 | */ |
674 | void rtnl_link_unregister(struct rtnl_link_ops *ops) |
675 | { |
676 | struct net *net; |
677 | |
678 | mutex_lock(&link_ops_mutex); |
679 | list_del_rcu(entry: &ops->list); |
680 | mutex_unlock(lock: &link_ops_mutex); |
681 | |
682 | synchronize_srcu(ssp: &ops->srcu); |
683 | cleanup_srcu_struct(ssp: &ops->srcu); |
684 | |
685 | /* Close the race with setup_net() and cleanup_net() */ |
686 | down_write(sem: &pernet_ops_rwsem); |
687 | rtnl_lock_unregistering_all(); |
688 | |
689 | for_each_net(net) |
690 | __rtnl_kill_links(net, ops); |
691 | |
692 | rtnl_unlock(); |
693 | up_write(sem: &pernet_ops_rwsem); |
694 | } |
695 | EXPORT_SYMBOL_GPL(rtnl_link_unregister); |
696 | |
697 | static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) |
698 | { |
699 | struct net_device *master_dev; |
700 | const struct rtnl_link_ops *ops; |
701 | size_t size = 0; |
702 | |
703 | rcu_read_lock(); |
704 | |
705 | master_dev = netdev_master_upper_dev_get_rcu(dev: (struct net_device *)dev); |
706 | if (!master_dev) |
707 | goto out; |
708 | |
709 | ops = master_dev->rtnl_link_ops; |
710 | if (!ops || !ops->get_slave_size) |
711 | goto out; |
712 | /* IFLA_INFO_SLAVE_DATA + nested data */ |
713 | size = nla_total_size(payload: sizeof(struct nlattr)) + |
714 | ops->get_slave_size(master_dev, dev); |
715 | |
716 | out: |
717 | rcu_read_unlock(); |
718 | return size; |
719 | } |
720 | |
721 | static size_t rtnl_link_get_size(const struct net_device *dev) |
722 | { |
723 | const struct rtnl_link_ops *ops = dev->rtnl_link_ops; |
724 | size_t size; |
725 | |
726 | if (!ops) |
727 | return 0; |
728 | |
729 | size = nla_total_size(payload: sizeof(struct nlattr)) + /* IFLA_LINKINFO */ |
730 | nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ |
731 | |
732 | if (ops->get_size) |
733 | /* IFLA_INFO_DATA + nested data */ |
734 | size += nla_total_size(payload: sizeof(struct nlattr)) + |
735 | ops->get_size(dev); |
736 | |
737 | if (ops->get_xstats_size) |
738 | /* IFLA_INFO_XSTATS */ |
739 | size += nla_total_size(payload: ops->get_xstats_size(dev)); |
740 | |
741 | size += rtnl_link_get_slave_info_data_size(dev); |
742 | |
743 | return size; |
744 | } |
745 | |
746 | static LIST_HEAD(rtnl_af_ops); |
747 | |
748 | static struct rtnl_af_ops *rtnl_af_lookup(const int family, int *srcu_index) |
749 | { |
750 | struct rtnl_af_ops *ops; |
751 | |
752 | ASSERT_RTNL(); |
753 | |
754 | rcu_read_lock(); |
755 | |
756 | list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { |
757 | if (ops->family == family) { |
758 | *srcu_index = srcu_read_lock(ssp: &ops->srcu); |
759 | goto unlock; |
760 | } |
761 | } |
762 | |
763 | ops = NULL; |
764 | unlock: |
765 | rcu_read_unlock(); |
766 | |
767 | return ops; |
768 | } |
769 | |
770 | static void rtnl_af_put(struct rtnl_af_ops *ops, int srcu_index) |
771 | { |
772 | srcu_read_unlock(ssp: &ops->srcu, idx: srcu_index); |
773 | } |
774 | |
775 | /** |
776 | * rtnl_af_register - Register rtnl_af_ops with rtnetlink. |
777 | * @ops: struct rtnl_af_ops * to register |
778 | * |
779 | * Return: 0 on success or a negative error code. |
780 | */ |
781 | int rtnl_af_register(struct rtnl_af_ops *ops) |
782 | { |
783 | int err = init_srcu_struct(&ops->srcu); |
784 | |
785 | if (err) |
786 | return err; |
787 | |
788 | rtnl_lock(); |
789 | list_add_tail_rcu(new: &ops->list, head: &rtnl_af_ops); |
790 | rtnl_unlock(); |
791 | |
792 | return 0; |
793 | } |
794 | EXPORT_SYMBOL_GPL(rtnl_af_register); |
795 | |
796 | /** |
797 | * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. |
798 | * @ops: struct rtnl_af_ops * to unregister |
799 | */ |
800 | void rtnl_af_unregister(struct rtnl_af_ops *ops) |
801 | { |
802 | rtnl_lock(); |
803 | list_del_rcu(entry: &ops->list); |
804 | rtnl_unlock(); |
805 | |
806 | synchronize_rcu(); |
807 | synchronize_srcu(ssp: &ops->srcu); |
808 | cleanup_srcu_struct(ssp: &ops->srcu); |
809 | } |
810 | EXPORT_SYMBOL_GPL(rtnl_af_unregister); |
811 | |
812 | static size_t rtnl_link_get_af_size(const struct net_device *dev, |
813 | u32 ext_filter_mask) |
814 | { |
815 | struct rtnl_af_ops *af_ops; |
816 | size_t size; |
817 | |
818 | /* IFLA_AF_SPEC */ |
819 | size = nla_total_size(payload: sizeof(struct nlattr)); |
820 | |
821 | rcu_read_lock(); |
822 | list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { |
823 | if (af_ops->get_link_af_size) { |
824 | /* AF_* + nested data */ |
825 | size += nla_total_size(payload: sizeof(struct nlattr)) + |
826 | af_ops->get_link_af_size(dev, ext_filter_mask); |
827 | } |
828 | } |
829 | rcu_read_unlock(); |
830 | |
831 | return size; |
832 | } |
833 | |
834 | static bool rtnl_have_link_slave_info(const struct net_device *dev) |
835 | { |
836 | struct net_device *master_dev; |
837 | bool ret = false; |
838 | |
839 | rcu_read_lock(); |
840 | |
841 | master_dev = netdev_master_upper_dev_get_rcu(dev: (struct net_device *)dev); |
842 | if (master_dev && master_dev->rtnl_link_ops) |
843 | ret = true; |
844 | rcu_read_unlock(); |
845 | return ret; |
846 | } |
847 | |
848 | static int rtnl_link_slave_info_fill(struct sk_buff *skb, |
849 | const struct net_device *dev) |
850 | { |
851 | struct net_device *master_dev; |
852 | const struct rtnl_link_ops *ops; |
853 | struct nlattr *slave_data; |
854 | int err; |
855 | |
856 | master_dev = netdev_master_upper_dev_get(dev: (struct net_device *) dev); |
857 | if (!master_dev) |
858 | return 0; |
859 | ops = master_dev->rtnl_link_ops; |
860 | if (!ops) |
861 | return 0; |
862 | if (nla_put_string(skb, attrtype: IFLA_INFO_SLAVE_KIND, str: ops->kind) < 0) |
863 | return -EMSGSIZE; |
864 | if (ops->fill_slave_info) { |
865 | slave_data = nla_nest_start_noflag(skb, attrtype: IFLA_INFO_SLAVE_DATA); |
866 | if (!slave_data) |
867 | return -EMSGSIZE; |
868 | err = ops->fill_slave_info(skb, master_dev, dev); |
869 | if (err < 0) |
870 | goto err_cancel_slave_data; |
871 | nla_nest_end(skb, start: slave_data); |
872 | } |
873 | return 0; |
874 | |
875 | err_cancel_slave_data: |
876 | nla_nest_cancel(skb, start: slave_data); |
877 | return err; |
878 | } |
879 | |
880 | static int rtnl_link_info_fill(struct sk_buff *skb, |
881 | const struct net_device *dev) |
882 | { |
883 | const struct rtnl_link_ops *ops = dev->rtnl_link_ops; |
884 | struct nlattr *data; |
885 | int err; |
886 | |
887 | if (!ops) |
888 | return 0; |
889 | if (nla_put_string(skb, attrtype: IFLA_INFO_KIND, str: ops->kind) < 0) |
890 | return -EMSGSIZE; |
891 | if (ops->fill_xstats) { |
892 | err = ops->fill_xstats(skb, dev); |
893 | if (err < 0) |
894 | return err; |
895 | } |
896 | if (ops->fill_info) { |
897 | data = nla_nest_start_noflag(skb, attrtype: IFLA_INFO_DATA); |
898 | if (data == NULL) |
899 | return -EMSGSIZE; |
900 | err = ops->fill_info(skb, dev); |
901 | if (err < 0) |
902 | goto err_cancel_data; |
903 | nla_nest_end(skb, start: data); |
904 | } |
905 | return 0; |
906 | |
907 | err_cancel_data: |
908 | nla_nest_cancel(skb, start: data); |
909 | return err; |
910 | } |
911 | |
912 | static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) |
913 | { |
914 | struct nlattr *linkinfo; |
915 | int err = -EMSGSIZE; |
916 | |
917 | linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); |
918 | if (linkinfo == NULL) |
919 | goto out; |
920 | |
921 | err = rtnl_link_info_fill(skb, dev); |
922 | if (err < 0) |
923 | goto err_cancel_link; |
924 | |
925 | err = rtnl_link_slave_info_fill(skb, dev); |
926 | if (err < 0) |
927 | goto err_cancel_link; |
928 | |
929 | nla_nest_end(skb, start: linkinfo); |
930 | return 0; |
931 | |
932 | err_cancel_link: |
933 | nla_nest_cancel(skb, start: linkinfo); |
934 | out: |
935 | return err; |
936 | } |
937 | |
938 | int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) |
939 | { |
940 | struct sock *rtnl = net->rtnl; |
941 | |
942 | return nlmsg_notify(sk: rtnl, skb, portid: pid, group, report: echo, GFP_KERNEL); |
943 | } |
944 | |
945 | int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) |
946 | { |
947 | struct sock *rtnl = net->rtnl; |
948 | |
949 | return nlmsg_unicast(sk: rtnl, skb, portid: pid); |
950 | } |
951 | EXPORT_SYMBOL(rtnl_unicast); |
952 | |
953 | void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, |
954 | const struct nlmsghdr *nlh, gfp_t flags) |
955 | { |
956 | struct sock *rtnl = net->rtnl; |
957 | |
958 | nlmsg_notify(sk: rtnl, skb, portid: pid, group, report: nlmsg_report(nlh), flags); |
959 | } |
960 | EXPORT_SYMBOL(rtnl_notify); |
961 | |
962 | void rtnl_set_sk_err(struct net *net, u32 group, int error) |
963 | { |
964 | struct sock *rtnl = net->rtnl; |
965 | |
966 | netlink_set_err(ssk: rtnl, portid: 0, group, code: error); |
967 | } |
968 | EXPORT_SYMBOL(rtnl_set_sk_err); |
969 | |
970 | int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) |
971 | { |
972 | struct nlattr *mx; |
973 | int i, valid = 0; |
974 | |
975 | /* nothing is dumped for dst_default_metrics, so just skip the loop */ |
976 | if (metrics == dst_default_metrics.metrics) |
977 | return 0; |
978 | |
979 | mx = nla_nest_start_noflag(skb, attrtype: RTA_METRICS); |
980 | if (mx == NULL) |
981 | return -ENOBUFS; |
982 | |
983 | for (i = 0; i < RTAX_MAX; i++) { |
984 | if (metrics[i]) { |
985 | if (i == RTAX_CC_ALGO - 1) { |
986 | char tmp[TCP_CA_NAME_MAX], *name; |
987 | |
988 | name = tcp_ca_get_name_by_key(key: metrics[i], buffer: tmp); |
989 | if (!name) |
990 | continue; |
991 | if (nla_put_string(skb, attrtype: i + 1, str: name)) |
992 | goto nla_put_failure; |
993 | } else if (i == RTAX_FEATURES - 1) { |
994 | u32 user_features = metrics[i] & RTAX_FEATURE_MASK; |
995 | |
996 | if (!user_features) |
997 | continue; |
998 | BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); |
999 | if (nla_put_u32(skb, attrtype: i + 1, value: user_features)) |
1000 | goto nla_put_failure; |
1001 | } else { |
1002 | if (nla_put_u32(skb, attrtype: i + 1, value: metrics[i])) |
1003 | goto nla_put_failure; |
1004 | } |
1005 | valid++; |
1006 | } |
1007 | } |
1008 | |
1009 | if (!valid) { |
1010 | nla_nest_cancel(skb, start: mx); |
1011 | return 0; |
1012 | } |
1013 | |
1014 | return nla_nest_end(skb, start: mx); |
1015 | |
1016 | nla_put_failure: |
1017 | nla_nest_cancel(skb, start: mx); |
1018 | return -EMSGSIZE; |
1019 | } |
1020 | EXPORT_SYMBOL(rtnetlink_put_metrics); |
1021 | |
1022 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, |
1023 | long expires, u32 error) |
1024 | { |
1025 | struct rta_cacheinfo ci = { |
1026 | .rta_error = error, |
1027 | .rta_id = id, |
1028 | }; |
1029 | |
1030 | if (dst) { |
1031 | ci.rta_lastuse = jiffies_delta_to_clock_t(delta: jiffies - dst->lastuse); |
1032 | ci.rta_used = dst->__use; |
1033 | ci.rta_clntref = rcuref_read(ref: &dst->__rcuref); |
1034 | } |
1035 | if (expires) { |
1036 | unsigned long clock; |
1037 | |
1038 | clock = jiffies_to_clock_t(abs(expires)); |
1039 | clock = min_t(unsigned long, clock, INT_MAX); |
1040 | ci.rta_expires = (expires > 0) ? clock : -clock; |
1041 | } |
1042 | return nla_put(skb, attrtype: RTA_CACHEINFO, attrlen: sizeof(ci), data: &ci); |
1043 | } |
1044 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); |
1045 | |
1046 | void netif_set_operstate(struct net_device *dev, int newstate) |
1047 | { |
1048 | unsigned int old = READ_ONCE(dev->operstate); |
1049 | |
1050 | do { |
1051 | if (old == newstate) |
1052 | return; |
1053 | } while (!try_cmpxchg(&dev->operstate, &old, newstate)); |
1054 | |
1055 | netif_state_change(dev); |
1056 | } |
1057 | EXPORT_SYMBOL(netif_set_operstate); |
1058 | |
1059 | static void set_operstate(struct net_device *dev, unsigned char transition) |
1060 | { |
1061 | unsigned char operstate = READ_ONCE(dev->operstate); |
1062 | |
1063 | switch (transition) { |
1064 | case IF_OPER_UP: |
1065 | if ((operstate == IF_OPER_DORMANT || |
1066 | operstate == IF_OPER_TESTING || |
1067 | operstate == IF_OPER_UNKNOWN) && |
1068 | !netif_dormant(dev) && !netif_testing(dev)) |
1069 | operstate = IF_OPER_UP; |
1070 | break; |
1071 | |
1072 | case IF_OPER_TESTING: |
1073 | if (netif_oper_up(dev)) |
1074 | operstate = IF_OPER_TESTING; |
1075 | break; |
1076 | |
1077 | case IF_OPER_DORMANT: |
1078 | if (netif_oper_up(dev)) |
1079 | operstate = IF_OPER_DORMANT; |
1080 | break; |
1081 | } |
1082 | |
1083 | netif_set_operstate(dev, operstate); |
1084 | } |
1085 | |
1086 | static unsigned int rtnl_dev_get_flags(const struct net_device *dev) |
1087 | { |
1088 | return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | |
1089 | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); |
1090 | } |
1091 | |
1092 | static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, |
1093 | const struct ifinfomsg *ifm) |
1094 | { |
1095 | unsigned int flags = ifm->ifi_flags; |
1096 | |
1097 | /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ |
1098 | if (ifm->ifi_change) |
1099 | flags = (flags & ifm->ifi_change) | |
1100 | (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); |
1101 | |
1102 | return flags; |
1103 | } |
1104 | |
1105 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, |
1106 | const struct rtnl_link_stats64 *b) |
1107 | { |
1108 | a->rx_packets = b->rx_packets; |
1109 | a->tx_packets = b->tx_packets; |
1110 | a->rx_bytes = b->rx_bytes; |
1111 | a->tx_bytes = b->tx_bytes; |
1112 | a->rx_errors = b->rx_errors; |
1113 | a->tx_errors = b->tx_errors; |
1114 | a->rx_dropped = b->rx_dropped; |
1115 | a->tx_dropped = b->tx_dropped; |
1116 | |
1117 | a->multicast = b->multicast; |
1118 | a->collisions = b->collisions; |
1119 | |
1120 | a->rx_length_errors = b->rx_length_errors; |
1121 | a->rx_over_errors = b->rx_over_errors; |
1122 | a->rx_crc_errors = b->rx_crc_errors; |
1123 | a->rx_frame_errors = b->rx_frame_errors; |
1124 | a->rx_fifo_errors = b->rx_fifo_errors; |
1125 | a->rx_missed_errors = b->rx_missed_errors; |
1126 | |
1127 | a->tx_aborted_errors = b->tx_aborted_errors; |
1128 | a->tx_carrier_errors = b->tx_carrier_errors; |
1129 | a->tx_fifo_errors = b->tx_fifo_errors; |
1130 | a->tx_heartbeat_errors = b->tx_heartbeat_errors; |
1131 | a->tx_window_errors = b->tx_window_errors; |
1132 | |
1133 | a->rx_compressed = b->rx_compressed; |
1134 | a->tx_compressed = b->tx_compressed; |
1135 | |
1136 | a->rx_nohandler = b->rx_nohandler; |
1137 | } |
1138 | |
1139 | /* All VF info */ |
1140 | static inline int rtnl_vfinfo_size(const struct net_device *dev, |
1141 | u32 ext_filter_mask) |
1142 | { |
1143 | if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { |
1144 | int num_vfs = dev_num_vf(dev: dev->dev.parent); |
1145 | size_t size = nla_total_size(payload: 0); |
1146 | size += num_vfs * |
1147 | (nla_total_size(payload: 0) + |
1148 | nla_total_size(payload: sizeof(struct ifla_vf_mac)) + |
1149 | nla_total_size(payload: sizeof(struct ifla_vf_broadcast)) + |
1150 | nla_total_size(payload: sizeof(struct ifla_vf_vlan)) + |
1151 | nla_total_size(payload: 0) + /* nest IFLA_VF_VLAN_LIST */ |
1152 | nla_total_size(MAX_VLAN_LIST_LEN * |
1153 | sizeof(struct ifla_vf_vlan_info)) + |
1154 | nla_total_size(payload: sizeof(struct ifla_vf_spoofchk)) + |
1155 | nla_total_size(payload: sizeof(struct ifla_vf_tx_rate)) + |
1156 | nla_total_size(payload: sizeof(struct ifla_vf_rate)) + |
1157 | nla_total_size(payload: sizeof(struct ifla_vf_link_state)) + |
1158 | nla_total_size(payload: sizeof(struct ifla_vf_rss_query_en)) + |
1159 | nla_total_size(payload: sizeof(struct ifla_vf_trust))); |
1160 | if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { |
1161 | size += num_vfs * |
1162 | (nla_total_size(payload: 0) + /* nest IFLA_VF_STATS */ |
1163 | /* IFLA_VF_STATS_RX_PACKETS */ |
1164 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1165 | /* IFLA_VF_STATS_TX_PACKETS */ |
1166 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1167 | /* IFLA_VF_STATS_RX_BYTES */ |
1168 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1169 | /* IFLA_VF_STATS_TX_BYTES */ |
1170 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1171 | /* IFLA_VF_STATS_BROADCAST */ |
1172 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1173 | /* IFLA_VF_STATS_MULTICAST */ |
1174 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1175 | /* IFLA_VF_STATS_RX_DROPPED */ |
1176 | nla_total_size_64bit(payload: sizeof(__u64)) + |
1177 | /* IFLA_VF_STATS_TX_DROPPED */ |
1178 | nla_total_size_64bit(payload: sizeof(__u64))); |
1179 | } |
1180 | if (dev->netdev_ops->ndo_get_vf_guid) |
1181 | size += num_vfs * 2 * |
1182 | nla_total_size(payload: sizeof(struct ifla_vf_guid)); |
1183 | return size; |
1184 | } else |
1185 | return 0; |
1186 | } |
1187 | |
1188 | static size_t rtnl_port_size(const struct net_device *dev, |
1189 | u32 ext_filter_mask) |
1190 | { |
1191 | size_t port_size = nla_total_size(payload: 4) /* PORT_VF */ |
1192 | + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ |
1193 | + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ |
1194 | + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ |
1195 | + nla_total_size(payload: 1) /* PROT_VDP_REQUEST */ |
1196 | + nla_total_size(payload: 2); /* PORT_VDP_RESPONSE */ |
1197 | size_t vf_ports_size = nla_total_size(payload: sizeof(struct nlattr)); |
1198 | size_t vf_port_size = nla_total_size(payload: sizeof(struct nlattr)) |
1199 | + port_size; |
1200 | size_t port_self_size = nla_total_size(payload: sizeof(struct nlattr)) |
1201 | + port_size; |
1202 | |
1203 | if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || |
1204 | !(ext_filter_mask & RTEXT_FILTER_VF)) |
1205 | return 0; |
1206 | if (dev_num_vf(dev: dev->dev.parent)) |
1207 | return port_self_size + vf_ports_size + |
1208 | vf_port_size * dev_num_vf(dev: dev->dev.parent); |
1209 | else |
1210 | return port_self_size; |
1211 | } |
1212 | |
1213 | static size_t rtnl_xdp_size(void) |
1214 | { |
1215 | size_t xdp_size = nla_total_size(payload: 0) + /* nest IFLA_XDP */ |
1216 | nla_total_size(payload: 1) + /* XDP_ATTACHED */ |
1217 | nla_total_size(payload: 4) + /* XDP_PROG_ID (or 1st mode) */ |
1218 | nla_total_size(payload: 4); /* XDP_<mode>_PROG_ID */ |
1219 | |
1220 | return xdp_size; |
1221 | } |
1222 | |
1223 | static size_t rtnl_prop_list_size(const struct net_device *dev) |
1224 | { |
1225 | struct netdev_name_node *name_node; |
1226 | unsigned int cnt = 0; |
1227 | |
1228 | rcu_read_lock(); |
1229 | list_for_each_entry_rcu(name_node, &dev->name_node->list, list) |
1230 | cnt++; |
1231 | rcu_read_unlock(); |
1232 | |
1233 | if (!cnt) |
1234 | return 0; |
1235 | |
1236 | return nla_total_size(payload: 0) + cnt * nla_total_size(ALTIFNAMSIZ); |
1237 | } |
1238 | |
1239 | static size_t rtnl_proto_down_size(const struct net_device *dev) |
1240 | { |
1241 | size_t size = nla_total_size(payload: 1); |
1242 | |
1243 | /* Assume dev->proto_down_reason is not zero. */ |
1244 | size += nla_total_size(payload: 0) + nla_total_size(payload: 4); |
1245 | |
1246 | return size; |
1247 | } |
1248 | |
1249 | static size_t rtnl_devlink_port_size(const struct net_device *dev) |
1250 | { |
1251 | size_t size = nla_total_size(payload: 0); /* nest IFLA_DEVLINK_PORT */ |
1252 | |
1253 | if (dev->devlink_port) |
1254 | size += devlink_nl_port_handle_size(devlink_port: dev->devlink_port); |
1255 | |
1256 | return size; |
1257 | } |
1258 | |
1259 | static size_t rtnl_dpll_pin_size(const struct net_device *dev) |
1260 | { |
1261 | size_t size = nla_total_size(payload: 0); /* nest IFLA_DPLL_PIN */ |
1262 | |
1263 | size += dpll_netdev_pin_handle_size(dev); |
1264 | |
1265 | return size; |
1266 | } |
1267 | |
1268 | static noinline size_t if_nlmsg_size(const struct net_device *dev, |
1269 | u32 ext_filter_mask) |
1270 | { |
1271 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
1272 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ |
1273 | + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ |
1274 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ |
1275 | + nla_total_size_64bit(payload: sizeof(struct rtnl_link_ifmap)) |
1276 | + nla_total_size(payload: sizeof(struct rtnl_link_stats)) |
1277 | + nla_total_size_64bit(payload: sizeof(struct rtnl_link_stats64)) |
1278 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
1279 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ |
1280 | + nla_total_size(payload: 4) /* IFLA_TXQLEN */ |
1281 | + nla_total_size(payload: 4) /* IFLA_WEIGHT */ |
1282 | + nla_total_size(payload: 4) /* IFLA_MTU */ |
1283 | + nla_total_size(payload: 4) /* IFLA_LINK */ |
1284 | + nla_total_size(payload: 4) /* IFLA_MASTER */ |
1285 | + nla_total_size(payload: 1) /* IFLA_CARRIER */ |
1286 | + nla_total_size(payload: 4) /* IFLA_PROMISCUITY */ |
1287 | + nla_total_size(payload: 4) /* IFLA_ALLMULTI */ |
1288 | + nla_total_size(payload: 4) /* IFLA_NUM_TX_QUEUES */ |
1289 | + nla_total_size(payload: 4) /* IFLA_NUM_RX_QUEUES */ |
1290 | + nla_total_size(payload: 4) /* IFLA_GSO_MAX_SEGS */ |
1291 | + nla_total_size(payload: 4) /* IFLA_GSO_MAX_SIZE */ |
1292 | + nla_total_size(payload: 4) /* IFLA_GRO_MAX_SIZE */ |
1293 | + nla_total_size(payload: 4) /* IFLA_GSO_IPV4_MAX_SIZE */ |
1294 | + nla_total_size(payload: 4) /* IFLA_GRO_IPV4_MAX_SIZE */ |
1295 | + nla_total_size(payload: 4) /* IFLA_TSO_MAX_SIZE */ |
1296 | + nla_total_size(payload: 4) /* IFLA_TSO_MAX_SEGS */ |
1297 | + nla_total_size(payload: 1) /* IFLA_OPERSTATE */ |
1298 | + nla_total_size(payload: 1) /* IFLA_LINKMODE */ |
1299 | + nla_total_size(payload: 1) /* IFLA_NETNS_IMMUTABLE */ |
1300 | + nla_total_size(payload: 4) /* IFLA_CARRIER_CHANGES */ |
1301 | + nla_total_size(payload: 4) /* IFLA_LINK_NETNSID */ |
1302 | + nla_total_size(payload: 4) /* IFLA_GROUP */ |
1303 | + nla_total_size(payload: ext_filter_mask |
1304 | & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ |
1305 | + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ |
1306 | + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ |
1307 | + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ |
1308 | + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ |
1309 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ |
1310 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ |
1311 | + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ |
1312 | + rtnl_xdp_size() /* IFLA_XDP */ |
1313 | + nla_total_size(payload: 4) /* IFLA_EVENT */ |
1314 | + nla_total_size(payload: 4) /* IFLA_NEW_NETNSID */ |
1315 | + nla_total_size(payload: 4) /* IFLA_NEW_IFINDEX */ |
1316 | + rtnl_proto_down_size(dev) /* proto down */ |
1317 | + nla_total_size(payload: 4) /* IFLA_TARGET_NETNSID */ |
1318 | + nla_total_size(payload: 4) /* IFLA_CARRIER_UP_COUNT */ |
1319 | + nla_total_size(payload: 4) /* IFLA_CARRIER_DOWN_COUNT */ |
1320 | + nla_total_size(payload: 4) /* IFLA_MIN_MTU */ |
1321 | + nla_total_size(payload: 4) /* IFLA_MAX_MTU */ |
1322 | + rtnl_prop_list_size(dev) |
1323 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ |
1324 | + rtnl_devlink_port_size(dev) |
1325 | + rtnl_dpll_pin_size(dev) |
1326 | + nla_total_size(payload: 8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */ |
1327 | + 0; |
1328 | } |
1329 | |
1330 | static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) |
1331 | { |
1332 | struct nlattr *vf_ports; |
1333 | struct nlattr *vf_port; |
1334 | int vf; |
1335 | int err; |
1336 | |
1337 | vf_ports = nla_nest_start_noflag(skb, attrtype: IFLA_VF_PORTS); |
1338 | if (!vf_ports) |
1339 | return -EMSGSIZE; |
1340 | |
1341 | for (vf = 0; vf < dev_num_vf(dev: dev->dev.parent); vf++) { |
1342 | vf_port = nla_nest_start_noflag(skb, attrtype: IFLA_VF_PORT); |
1343 | if (!vf_port) |
1344 | goto nla_put_failure; |
1345 | if (nla_put_u32(skb, attrtype: IFLA_PORT_VF, value: vf)) |
1346 | goto nla_put_failure; |
1347 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); |
1348 | if (err == -EMSGSIZE) |
1349 | goto nla_put_failure; |
1350 | if (err) { |
1351 | nla_nest_cancel(skb, start: vf_port); |
1352 | continue; |
1353 | } |
1354 | nla_nest_end(skb, start: vf_port); |
1355 | } |
1356 | |
1357 | nla_nest_end(skb, start: vf_ports); |
1358 | |
1359 | return 0; |
1360 | |
1361 | nla_put_failure: |
1362 | nla_nest_cancel(skb, start: vf_ports); |
1363 | return -EMSGSIZE; |
1364 | } |
1365 | |
1366 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) |
1367 | { |
1368 | struct nlattr *port_self; |
1369 | int err; |
1370 | |
1371 | port_self = nla_nest_start_noflag(skb, attrtype: IFLA_PORT_SELF); |
1372 | if (!port_self) |
1373 | return -EMSGSIZE; |
1374 | |
1375 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); |
1376 | if (err) { |
1377 | nla_nest_cancel(skb, start: port_self); |
1378 | return (err == -EMSGSIZE) ? err : 0; |
1379 | } |
1380 | |
1381 | nla_nest_end(skb, start: port_self); |
1382 | |
1383 | return 0; |
1384 | } |
1385 | |
1386 | static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, |
1387 | u32 ext_filter_mask) |
1388 | { |
1389 | int err; |
1390 | |
1391 | if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || |
1392 | !(ext_filter_mask & RTEXT_FILTER_VF)) |
1393 | return 0; |
1394 | |
1395 | err = rtnl_port_self_fill(skb, dev); |
1396 | if (err) |
1397 | return err; |
1398 | |
1399 | if (dev_num_vf(dev: dev->dev.parent)) { |
1400 | err = rtnl_vf_ports_fill(skb, dev); |
1401 | if (err) |
1402 | return err; |
1403 | } |
1404 | |
1405 | return 0; |
1406 | } |
1407 | |
1408 | static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) |
1409 | { |
1410 | int err; |
1411 | struct netdev_phys_item_id ppid; |
1412 | |
1413 | err = dev_get_phys_port_id(dev, ppid: &ppid); |
1414 | if (err) { |
1415 | if (err == -EOPNOTSUPP) |
1416 | return 0; |
1417 | return err; |
1418 | } |
1419 | |
1420 | if (nla_put(skb, attrtype: IFLA_PHYS_PORT_ID, attrlen: ppid.id_len, data: ppid.id)) |
1421 | return -EMSGSIZE; |
1422 | |
1423 | return 0; |
1424 | } |
1425 | |
1426 | static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) |
1427 | { |
1428 | char name[IFNAMSIZ]; |
1429 | int err; |
1430 | |
1431 | err = dev_get_phys_port_name(dev, name, len: sizeof(name)); |
1432 | if (err) { |
1433 | if (err == -EOPNOTSUPP) |
1434 | return 0; |
1435 | return err; |
1436 | } |
1437 | |
1438 | if (nla_put_string(skb, attrtype: IFLA_PHYS_PORT_NAME, str: name)) |
1439 | return -EMSGSIZE; |
1440 | |
1441 | return 0; |
1442 | } |
1443 | |
1444 | static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) |
1445 | { |
1446 | struct netdev_phys_item_id ppid = { }; |
1447 | int err; |
1448 | |
1449 | err = dev_get_port_parent_id(dev, ppid: &ppid, recurse: false); |
1450 | if (err) { |
1451 | if (err == -EOPNOTSUPP) |
1452 | return 0; |
1453 | return err; |
1454 | } |
1455 | |
1456 | if (nla_put(skb, attrtype: IFLA_PHYS_SWITCH_ID, attrlen: ppid.id_len, data: ppid.id)) |
1457 | return -EMSGSIZE; |
1458 | |
1459 | return 0; |
1460 | } |
1461 | |
1462 | static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, |
1463 | struct net_device *dev) |
1464 | { |
1465 | struct rtnl_link_stats64 *sp; |
1466 | struct nlattr *attr; |
1467 | |
1468 | attr = nla_reserve_64bit(skb, attrtype: IFLA_STATS64, |
1469 | attrlen: sizeof(struct rtnl_link_stats64), padattr: IFLA_PAD); |
1470 | if (!attr) |
1471 | return -EMSGSIZE; |
1472 | |
1473 | sp = nla_data(nla: attr); |
1474 | dev_get_stats(dev, storage: sp); |
1475 | |
1476 | attr = nla_reserve(skb, attrtype: IFLA_STATS, |
1477 | attrlen: sizeof(struct rtnl_link_stats)); |
1478 | if (!attr) |
1479 | return -EMSGSIZE; |
1480 | |
1481 | copy_rtnl_link_stats(a: nla_data(nla: attr), b: sp); |
1482 | |
1483 | return 0; |
1484 | } |
1485 | |
1486 | static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, |
1487 | struct net_device *dev, |
1488 | int vfs_num, |
1489 | u32 ext_filter_mask) |
1490 | { |
1491 | struct ifla_vf_rss_query_en ; |
1492 | struct nlattr *vf, *vfstats, *vfvlanlist; |
1493 | struct ifla_vf_link_state vf_linkstate; |
1494 | struct ifla_vf_vlan_info vf_vlan_info; |
1495 | struct ifla_vf_spoofchk vf_spoofchk; |
1496 | struct ifla_vf_tx_rate vf_tx_rate; |
1497 | struct ifla_vf_stats vf_stats; |
1498 | struct ifla_vf_trust vf_trust; |
1499 | struct ifla_vf_vlan vf_vlan; |
1500 | struct ifla_vf_rate vf_rate; |
1501 | struct ifla_vf_mac vf_mac; |
1502 | struct ifla_vf_broadcast vf_broadcast; |
1503 | struct ifla_vf_info ivi; |
1504 | struct ifla_vf_guid node_guid; |
1505 | struct ifla_vf_guid port_guid; |
1506 | |
1507 | memset(&ivi, 0, sizeof(ivi)); |
1508 | |
1509 | /* Not all SR-IOV capable drivers support the |
1510 | * spoofcheck and "RSS query enable" query. Preset to |
1511 | * -1 so the user space tool can detect that the driver |
1512 | * didn't report anything. |
1513 | */ |
1514 | ivi.spoofchk = -1; |
1515 | ivi.rss_query_en = -1; |
1516 | ivi.trusted = -1; |
1517 | /* The default value for VF link state is "auto" |
1518 | * IFLA_VF_LINK_STATE_AUTO which equals zero |
1519 | */ |
1520 | ivi.linkstate = 0; |
1521 | /* VLAN Protocol by default is 802.1Q */ |
1522 | ivi.vlan_proto = htons(ETH_P_8021Q); |
1523 | if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) |
1524 | return 0; |
1525 | |
1526 | memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); |
1527 | memset(&node_guid, 0, sizeof(node_guid)); |
1528 | memset(&port_guid, 0, sizeof(port_guid)); |
1529 | |
1530 | vf_mac.vf = |
1531 | vf_vlan.vf = |
1532 | vf_vlan_info.vf = |
1533 | vf_rate.vf = |
1534 | vf_tx_rate.vf = |
1535 | vf_spoofchk.vf = |
1536 | vf_linkstate.vf = |
1537 | vf_rss_query_en.vf = |
1538 | vf_trust.vf = |
1539 | node_guid.vf = |
1540 | port_guid.vf = ivi.vf; |
1541 | |
1542 | memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); |
1543 | memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); |
1544 | vf_vlan.vlan = ivi.vlan; |
1545 | vf_vlan.qos = ivi.qos; |
1546 | vf_vlan_info.vlan = ivi.vlan; |
1547 | vf_vlan_info.qos = ivi.qos; |
1548 | vf_vlan_info.vlan_proto = ivi.vlan_proto; |
1549 | vf_tx_rate.rate = ivi.max_tx_rate; |
1550 | vf_rate.min_tx_rate = ivi.min_tx_rate; |
1551 | vf_rate.max_tx_rate = ivi.max_tx_rate; |
1552 | vf_spoofchk.setting = ivi.spoofchk; |
1553 | vf_linkstate.link_state = ivi.linkstate; |
1554 | vf_rss_query_en.setting = ivi.rss_query_en; |
1555 | vf_trust.setting = ivi.trusted; |
1556 | vf = nla_nest_start_noflag(skb, attrtype: IFLA_VF_INFO); |
1557 | if (!vf) |
1558 | return -EMSGSIZE; |
1559 | if (nla_put(skb, attrtype: IFLA_VF_MAC, attrlen: sizeof(vf_mac), data: &vf_mac) || |
1560 | nla_put(skb, attrtype: IFLA_VF_BROADCAST, attrlen: sizeof(vf_broadcast), data: &vf_broadcast) || |
1561 | nla_put(skb, attrtype: IFLA_VF_VLAN, attrlen: sizeof(vf_vlan), data: &vf_vlan) || |
1562 | nla_put(skb, attrtype: IFLA_VF_RATE, attrlen: sizeof(vf_rate), |
1563 | data: &vf_rate) || |
1564 | nla_put(skb, attrtype: IFLA_VF_TX_RATE, attrlen: sizeof(vf_tx_rate), |
1565 | data: &vf_tx_rate) || |
1566 | nla_put(skb, attrtype: IFLA_VF_SPOOFCHK, attrlen: sizeof(vf_spoofchk), |
1567 | data: &vf_spoofchk) || |
1568 | nla_put(skb, attrtype: IFLA_VF_LINK_STATE, attrlen: sizeof(vf_linkstate), |
1569 | data: &vf_linkstate) || |
1570 | nla_put(skb, attrtype: IFLA_VF_RSS_QUERY_EN, |
1571 | attrlen: sizeof(vf_rss_query_en), |
1572 | data: &vf_rss_query_en) || |
1573 | nla_put(skb, attrtype: IFLA_VF_TRUST, |
1574 | attrlen: sizeof(vf_trust), data: &vf_trust)) |
1575 | goto nla_put_vf_failure; |
1576 | |
1577 | if (dev->netdev_ops->ndo_get_vf_guid && |
1578 | !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, |
1579 | &port_guid)) { |
1580 | if (nla_put(skb, attrtype: IFLA_VF_IB_NODE_GUID, attrlen: sizeof(node_guid), |
1581 | data: &node_guid) || |
1582 | nla_put(skb, attrtype: IFLA_VF_IB_PORT_GUID, attrlen: sizeof(port_guid), |
1583 | data: &port_guid)) |
1584 | goto nla_put_vf_failure; |
1585 | } |
1586 | vfvlanlist = nla_nest_start_noflag(skb, attrtype: IFLA_VF_VLAN_LIST); |
1587 | if (!vfvlanlist) |
1588 | goto nla_put_vf_failure; |
1589 | if (nla_put(skb, attrtype: IFLA_VF_VLAN_INFO, attrlen: sizeof(vf_vlan_info), |
1590 | data: &vf_vlan_info)) { |
1591 | nla_nest_cancel(skb, start: vfvlanlist); |
1592 | goto nla_put_vf_failure; |
1593 | } |
1594 | nla_nest_end(skb, start: vfvlanlist); |
1595 | if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { |
1596 | memset(&vf_stats, 0, sizeof(vf_stats)); |
1597 | if (dev->netdev_ops->ndo_get_vf_stats) |
1598 | dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, |
1599 | &vf_stats); |
1600 | vfstats = nla_nest_start_noflag(skb, attrtype: IFLA_VF_STATS); |
1601 | if (!vfstats) |
1602 | goto nla_put_vf_failure; |
1603 | if (nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_RX_PACKETS, |
1604 | value: vf_stats.rx_packets, padattr: IFLA_VF_STATS_PAD) || |
1605 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_TX_PACKETS, |
1606 | value: vf_stats.tx_packets, padattr: IFLA_VF_STATS_PAD) || |
1607 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_RX_BYTES, |
1608 | value: vf_stats.rx_bytes, padattr: IFLA_VF_STATS_PAD) || |
1609 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_TX_BYTES, |
1610 | value: vf_stats.tx_bytes, padattr: IFLA_VF_STATS_PAD) || |
1611 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_BROADCAST, |
1612 | value: vf_stats.broadcast, padattr: IFLA_VF_STATS_PAD) || |
1613 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_MULTICAST, |
1614 | value: vf_stats.multicast, padattr: IFLA_VF_STATS_PAD) || |
1615 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_RX_DROPPED, |
1616 | value: vf_stats.rx_dropped, padattr: IFLA_VF_STATS_PAD) || |
1617 | nla_put_u64_64bit(skb, attrtype: IFLA_VF_STATS_TX_DROPPED, |
1618 | value: vf_stats.tx_dropped, padattr: IFLA_VF_STATS_PAD)) { |
1619 | nla_nest_cancel(skb, start: vfstats); |
1620 | goto nla_put_vf_failure; |
1621 | } |
1622 | nla_nest_end(skb, start: vfstats); |
1623 | } |
1624 | nla_nest_end(skb, start: vf); |
1625 | return 0; |
1626 | |
1627 | nla_put_vf_failure: |
1628 | nla_nest_cancel(skb, start: vf); |
1629 | return -EMSGSIZE; |
1630 | } |
1631 | |
1632 | static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, |
1633 | struct net_device *dev, |
1634 | u32 ext_filter_mask) |
1635 | { |
1636 | struct nlattr *vfinfo; |
1637 | int i, num_vfs; |
1638 | |
1639 | if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) |
1640 | return 0; |
1641 | |
1642 | num_vfs = dev_num_vf(dev: dev->dev.parent); |
1643 | if (nla_put_u32(skb, attrtype: IFLA_NUM_VF, value: num_vfs)) |
1644 | return -EMSGSIZE; |
1645 | |
1646 | if (!dev->netdev_ops->ndo_get_vf_config) |
1647 | return 0; |
1648 | |
1649 | vfinfo = nla_nest_start_noflag(skb, attrtype: IFLA_VFINFO_LIST); |
1650 | if (!vfinfo) |
1651 | return -EMSGSIZE; |
1652 | |
1653 | for (i = 0; i < num_vfs; i++) { |
1654 | if (rtnl_fill_vfinfo(skb, dev, vfs_num: i, ext_filter_mask)) { |
1655 | nla_nest_cancel(skb, start: vfinfo); |
1656 | return -EMSGSIZE; |
1657 | } |
1658 | } |
1659 | |
1660 | nla_nest_end(skb, start: vfinfo); |
1661 | return 0; |
1662 | } |
1663 | |
1664 | static int rtnl_fill_link_ifmap(struct sk_buff *skb, |
1665 | const struct net_device *dev) |
1666 | { |
1667 | struct rtnl_link_ifmap map; |
1668 | |
1669 | memset(&map, 0, sizeof(map)); |
1670 | map.mem_start = READ_ONCE(dev->mem_start); |
1671 | map.mem_end = READ_ONCE(dev->mem_end); |
1672 | map.base_addr = READ_ONCE(dev->base_addr); |
1673 | map.irq = READ_ONCE(dev->irq); |
1674 | map.dma = READ_ONCE(dev->dma); |
1675 | map.port = READ_ONCE(dev->if_port); |
1676 | |
1677 | if (nla_put_64bit(skb, IFLA_MAP, attrlen: sizeof(map), data: &map, padattr: IFLA_PAD)) |
1678 | return -EMSGSIZE; |
1679 | |
1680 | return 0; |
1681 | } |
1682 | |
1683 | static u32 rtnl_xdp_prog_skb(struct net_device *dev) |
1684 | { |
1685 | const struct bpf_prog *generic_xdp_prog; |
1686 | u32 res = 0; |
1687 | |
1688 | rcu_read_lock(); |
1689 | generic_xdp_prog = rcu_dereference(dev->xdp_prog); |
1690 | if (generic_xdp_prog) |
1691 | res = generic_xdp_prog->aux->id; |
1692 | rcu_read_unlock(); |
1693 | |
1694 | return res; |
1695 | } |
1696 | |
1697 | static u32 rtnl_xdp_prog_drv(struct net_device *dev) |
1698 | { |
1699 | return dev_xdp_prog_id(dev, mode: XDP_MODE_DRV); |
1700 | } |
1701 | |
1702 | static u32 rtnl_xdp_prog_hw(struct net_device *dev) |
1703 | { |
1704 | return dev_xdp_prog_id(dev, mode: XDP_MODE_HW); |
1705 | } |
1706 | |
1707 | static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, |
1708 | u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, |
1709 | u32 (*get_prog_id)(struct net_device *dev)) |
1710 | { |
1711 | u32 curr_id; |
1712 | int err; |
1713 | |
1714 | curr_id = get_prog_id(dev); |
1715 | if (!curr_id) |
1716 | return 0; |
1717 | |
1718 | *prog_id = curr_id; |
1719 | err = nla_put_u32(skb, attrtype: attr, value: curr_id); |
1720 | if (err) |
1721 | return err; |
1722 | |
1723 | if (*mode != XDP_ATTACHED_NONE) |
1724 | *mode = XDP_ATTACHED_MULTI; |
1725 | else |
1726 | *mode = tgt_mode; |
1727 | |
1728 | return 0; |
1729 | } |
1730 | |
1731 | static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) |
1732 | { |
1733 | struct nlattr *xdp; |
1734 | u32 prog_id; |
1735 | int err; |
1736 | u8 mode; |
1737 | |
1738 | xdp = nla_nest_start_noflag(skb, attrtype: IFLA_XDP); |
1739 | if (!xdp) |
1740 | return -EMSGSIZE; |
1741 | |
1742 | prog_id = 0; |
1743 | mode = XDP_ATTACHED_NONE; |
1744 | err = rtnl_xdp_report_one(skb, dev, prog_id: &prog_id, mode: &mode, tgt_mode: XDP_ATTACHED_SKB, |
1745 | attr: IFLA_XDP_SKB_PROG_ID, get_prog_id: rtnl_xdp_prog_skb); |
1746 | if (err) |
1747 | goto err_cancel; |
1748 | err = rtnl_xdp_report_one(skb, dev, prog_id: &prog_id, mode: &mode, tgt_mode: XDP_ATTACHED_DRV, |
1749 | attr: IFLA_XDP_DRV_PROG_ID, get_prog_id: rtnl_xdp_prog_drv); |
1750 | if (err) |
1751 | goto err_cancel; |
1752 | err = rtnl_xdp_report_one(skb, dev, prog_id: &prog_id, mode: &mode, tgt_mode: XDP_ATTACHED_HW, |
1753 | attr: IFLA_XDP_HW_PROG_ID, get_prog_id: rtnl_xdp_prog_hw); |
1754 | if (err) |
1755 | goto err_cancel; |
1756 | |
1757 | err = nla_put_u8(skb, attrtype: IFLA_XDP_ATTACHED, value: mode); |
1758 | if (err) |
1759 | goto err_cancel; |
1760 | |
1761 | if (prog_id && mode != XDP_ATTACHED_MULTI) { |
1762 | err = nla_put_u32(skb, attrtype: IFLA_XDP_PROG_ID, value: prog_id); |
1763 | if (err) |
1764 | goto err_cancel; |
1765 | } |
1766 | |
1767 | nla_nest_end(skb, start: xdp); |
1768 | return 0; |
1769 | |
1770 | err_cancel: |
1771 | nla_nest_cancel(skb, start: xdp); |
1772 | return err; |
1773 | } |
1774 | |
1775 | static u32 rtnl_get_event(unsigned long event) |
1776 | { |
1777 | u32 rtnl_event_type = IFLA_EVENT_NONE; |
1778 | |
1779 | switch (event) { |
1780 | case NETDEV_REBOOT: |
1781 | rtnl_event_type = IFLA_EVENT_REBOOT; |
1782 | break; |
1783 | case NETDEV_FEAT_CHANGE: |
1784 | rtnl_event_type = IFLA_EVENT_FEATURES; |
1785 | break; |
1786 | case NETDEV_BONDING_FAILOVER: |
1787 | rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; |
1788 | break; |
1789 | case NETDEV_NOTIFY_PEERS: |
1790 | rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; |
1791 | break; |
1792 | case NETDEV_RESEND_IGMP: |
1793 | rtnl_event_type = IFLA_EVENT_IGMP_RESEND; |
1794 | break; |
1795 | case NETDEV_CHANGEINFODATA: |
1796 | rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; |
1797 | break; |
1798 | default: |
1799 | break; |
1800 | } |
1801 | |
1802 | return rtnl_event_type; |
1803 | } |
1804 | |
1805 | static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) |
1806 | { |
1807 | const struct net_device *upper_dev; |
1808 | int ret = 0; |
1809 | |
1810 | rcu_read_lock(); |
1811 | |
1812 | upper_dev = netdev_master_upper_dev_get_rcu(dev); |
1813 | if (upper_dev) |
1814 | ret = nla_put_u32(skb, IFLA_MASTER, |
1815 | READ_ONCE(upper_dev->ifindex)); |
1816 | |
1817 | rcu_read_unlock(); |
1818 | return ret; |
1819 | } |
1820 | |
1821 | static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, |
1822 | bool force) |
1823 | { |
1824 | int iflink = dev_get_iflink(dev); |
1825 | |
1826 | if (force || READ_ONCE(dev->ifindex) != iflink) |
1827 | return nla_put_u32(skb, attrtype: IFLA_LINK, value: iflink); |
1828 | |
1829 | return 0; |
1830 | } |
1831 | |
1832 | static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, |
1833 | struct net_device *dev) |
1834 | { |
1835 | char buf[IFALIASZ]; |
1836 | int ret; |
1837 | |
1838 | ret = dev_get_alias(dev, buf, sizeof(buf)); |
1839 | return ret > 0 ? nla_put_string(skb, attrtype: IFLA_IFALIAS, str: buf) : 0; |
1840 | } |
1841 | |
1842 | static int rtnl_fill_link_netnsid(struct sk_buff *skb, |
1843 | const struct net_device *dev, |
1844 | struct net *src_net, gfp_t gfp) |
1845 | { |
1846 | bool put_iflink = false; |
1847 | |
1848 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { |
1849 | struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); |
1850 | |
1851 | if (!net_eq(net1: dev_net(dev), net2: link_net)) { |
1852 | int id = peernet2id_alloc(net: src_net, peer: link_net, gfp); |
1853 | |
1854 | if (nla_put_s32(skb, attrtype: IFLA_LINK_NETNSID, value: id)) |
1855 | return -EMSGSIZE; |
1856 | |
1857 | put_iflink = true; |
1858 | } |
1859 | } |
1860 | |
1861 | return nla_put_iflink(skb, dev, force: put_iflink); |
1862 | } |
1863 | |
1864 | static int rtnl_fill_link_af(struct sk_buff *skb, |
1865 | const struct net_device *dev, |
1866 | u32 ext_filter_mask) |
1867 | { |
1868 | const struct rtnl_af_ops *af_ops; |
1869 | struct nlattr *af_spec; |
1870 | |
1871 | af_spec = nla_nest_start_noflag(skb, attrtype: IFLA_AF_SPEC); |
1872 | if (!af_spec) |
1873 | return -EMSGSIZE; |
1874 | |
1875 | list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { |
1876 | struct nlattr *af; |
1877 | int err; |
1878 | |
1879 | if (!af_ops->fill_link_af) |
1880 | continue; |
1881 | |
1882 | af = nla_nest_start_noflag(skb, attrtype: af_ops->family); |
1883 | if (!af) |
1884 | return -EMSGSIZE; |
1885 | |
1886 | err = af_ops->fill_link_af(skb, dev, ext_filter_mask); |
1887 | /* |
1888 | * Caller may return ENODATA to indicate that there |
1889 | * was no data to be dumped. This is not an error, it |
1890 | * means we should trim the attribute header and |
1891 | * continue. |
1892 | */ |
1893 | if (err == -ENODATA) |
1894 | nla_nest_cancel(skb, start: af); |
1895 | else if (err < 0) |
1896 | return -EMSGSIZE; |
1897 | |
1898 | nla_nest_end(skb, start: af); |
1899 | } |
1900 | |
1901 | nla_nest_end(skb, start: af_spec); |
1902 | return 0; |
1903 | } |
1904 | |
1905 | static int rtnl_fill_alt_ifnames(struct sk_buff *skb, |
1906 | const struct net_device *dev) |
1907 | { |
1908 | struct netdev_name_node *name_node; |
1909 | int count = 0; |
1910 | |
1911 | list_for_each_entry_rcu(name_node, &dev->name_node->list, list) { |
1912 | if (nla_put_string(skb, attrtype: IFLA_ALT_IFNAME, str: name_node->name)) |
1913 | return -EMSGSIZE; |
1914 | count++; |
1915 | } |
1916 | return count; |
1917 | } |
1918 | |
1919 | /* RCU protected. */ |
1920 | static int rtnl_fill_prop_list(struct sk_buff *skb, |
1921 | const struct net_device *dev) |
1922 | { |
1923 | struct nlattr *prop_list; |
1924 | int ret; |
1925 | |
1926 | prop_list = nla_nest_start(skb, attrtype: IFLA_PROP_LIST); |
1927 | if (!prop_list) |
1928 | return -EMSGSIZE; |
1929 | |
1930 | ret = rtnl_fill_alt_ifnames(skb, dev); |
1931 | if (ret <= 0) |
1932 | goto nest_cancel; |
1933 | |
1934 | nla_nest_end(skb, start: prop_list); |
1935 | return 0; |
1936 | |
1937 | nest_cancel: |
1938 | nla_nest_cancel(skb, start: prop_list); |
1939 | return ret; |
1940 | } |
1941 | |
1942 | static int rtnl_fill_proto_down(struct sk_buff *skb, |
1943 | const struct net_device *dev) |
1944 | { |
1945 | struct nlattr *pr; |
1946 | u32 preason; |
1947 | |
1948 | if (nla_put_u8(skb, attrtype: IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down))) |
1949 | goto nla_put_failure; |
1950 | |
1951 | preason = READ_ONCE(dev->proto_down_reason); |
1952 | if (!preason) |
1953 | return 0; |
1954 | |
1955 | pr = nla_nest_start(skb, attrtype: IFLA_PROTO_DOWN_REASON); |
1956 | if (!pr) |
1957 | return -EMSGSIZE; |
1958 | |
1959 | if (nla_put_u32(skb, attrtype: IFLA_PROTO_DOWN_REASON_VALUE, value: preason)) { |
1960 | nla_nest_cancel(skb, start: pr); |
1961 | goto nla_put_failure; |
1962 | } |
1963 | |
1964 | nla_nest_end(skb, start: pr); |
1965 | return 0; |
1966 | |
1967 | nla_put_failure: |
1968 | return -EMSGSIZE; |
1969 | } |
1970 | |
1971 | static int rtnl_fill_devlink_port(struct sk_buff *skb, |
1972 | const struct net_device *dev) |
1973 | { |
1974 | struct nlattr *devlink_port_nest; |
1975 | int ret; |
1976 | |
1977 | devlink_port_nest = nla_nest_start(skb, attrtype: IFLA_DEVLINK_PORT); |
1978 | if (!devlink_port_nest) |
1979 | return -EMSGSIZE; |
1980 | |
1981 | if (dev->devlink_port) { |
1982 | ret = devlink_nl_port_handle_fill(msg: skb, devlink_port: dev->devlink_port); |
1983 | if (ret < 0) |
1984 | goto nest_cancel; |
1985 | } |
1986 | |
1987 | nla_nest_end(skb, start: devlink_port_nest); |
1988 | return 0; |
1989 | |
1990 | nest_cancel: |
1991 | nla_nest_cancel(skb, start: devlink_port_nest); |
1992 | return ret; |
1993 | } |
1994 | |
1995 | static int rtnl_fill_dpll_pin(struct sk_buff *skb, |
1996 | const struct net_device *dev) |
1997 | { |
1998 | struct nlattr *dpll_pin_nest; |
1999 | int ret; |
2000 | |
2001 | dpll_pin_nest = nla_nest_start(skb, attrtype: IFLA_DPLL_PIN); |
2002 | if (!dpll_pin_nest) |
2003 | return -EMSGSIZE; |
2004 | |
2005 | ret = dpll_netdev_add_pin_handle(msg: skb, dev); |
2006 | if (ret < 0) |
2007 | goto nest_cancel; |
2008 | |
2009 | nla_nest_end(skb, start: dpll_pin_nest); |
2010 | return 0; |
2011 | |
2012 | nest_cancel: |
2013 | nla_nest_cancel(skb, start: dpll_pin_nest); |
2014 | return ret; |
2015 | } |
2016 | |
2017 | static int rtnl_fill_ifinfo(struct sk_buff *skb, |
2018 | struct net_device *dev, struct net *src_net, |
2019 | int type, u32 pid, u32 seq, u32 change, |
2020 | unsigned int flags, u32 ext_filter_mask, |
2021 | u32 event, int *new_nsid, int new_ifindex, |
2022 | int tgt_netnsid, gfp_t gfp) |
2023 | { |
2024 | char devname[IFNAMSIZ]; |
2025 | struct ifinfomsg *ifm; |
2026 | struct nlmsghdr *nlh; |
2027 | struct Qdisc *qdisc; |
2028 | |
2029 | ASSERT_RTNL(); |
2030 | nlh = nlmsg_put(skb, portid: pid, seq, type, payload: sizeof(*ifm), flags); |
2031 | if (nlh == NULL) |
2032 | return -EMSGSIZE; |
2033 | |
2034 | ifm = nlmsg_data(nlh); |
2035 | ifm->ifi_family = AF_UNSPEC; |
2036 | ifm->__ifi_pad = 0; |
2037 | ifm->ifi_type = READ_ONCE(dev->type); |
2038 | ifm->ifi_index = READ_ONCE(dev->ifindex); |
2039 | ifm->ifi_flags = dev_get_flags(dev); |
2040 | ifm->ifi_change = change; |
2041 | |
2042 | if (tgt_netnsid >= 0 && nla_put_s32(skb, attrtype: IFLA_TARGET_NETNSID, value: tgt_netnsid)) |
2043 | goto nla_put_failure; |
2044 | |
2045 | netdev_copy_name(dev, name: devname); |
2046 | if (nla_put_string(skb, attrtype: IFLA_IFNAME, str: devname)) |
2047 | goto nla_put_failure; |
2048 | |
2049 | if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) || |
2050 | nla_put_u8(skb, attrtype: IFLA_OPERSTATE, |
2051 | value: netif_running(dev) ? READ_ONCE(dev->operstate) : |
2052 | IF_OPER_DOWN) || |
2053 | nla_put_u8(skb, attrtype: IFLA_LINKMODE, READ_ONCE(dev->link_mode)) || |
2054 | nla_put_u8(skb, attrtype: IFLA_NETNS_IMMUTABLE, value: dev->netns_immutable) || |
2055 | nla_put_u32(skb, attrtype: IFLA_MTU, READ_ONCE(dev->mtu)) || |
2056 | nla_put_u32(skb, attrtype: IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) || |
2057 | nla_put_u32(skb, attrtype: IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) || |
2058 | nla_put_u32(skb, attrtype: IFLA_GROUP, READ_ONCE(dev->group)) || |
2059 | nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) || |
2060 | nla_put_u32(skb, attrtype: IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) || |
2061 | nla_put_u32(skb, attrtype: IFLA_NUM_TX_QUEUES, |
2062 | READ_ONCE(dev->num_tx_queues)) || |
2063 | nla_put_u32(skb, attrtype: IFLA_GSO_MAX_SEGS, |
2064 | READ_ONCE(dev->gso_max_segs)) || |
2065 | nla_put_u32(skb, attrtype: IFLA_GSO_MAX_SIZE, |
2066 | READ_ONCE(dev->gso_max_size)) || |
2067 | nla_put_u32(skb, attrtype: IFLA_GRO_MAX_SIZE, |
2068 | READ_ONCE(dev->gro_max_size)) || |
2069 | nla_put_u32(skb, attrtype: IFLA_GSO_IPV4_MAX_SIZE, |
2070 | READ_ONCE(dev->gso_ipv4_max_size)) || |
2071 | nla_put_u32(skb, attrtype: IFLA_GRO_IPV4_MAX_SIZE, |
2072 | READ_ONCE(dev->gro_ipv4_max_size)) || |
2073 | nla_put_u32(skb, attrtype: IFLA_TSO_MAX_SIZE, |
2074 | READ_ONCE(dev->tso_max_size)) || |
2075 | nla_put_u32(skb, attrtype: IFLA_TSO_MAX_SEGS, |
2076 | READ_ONCE(dev->tso_max_segs)) || |
2077 | nla_put_uint(skb, attrtype: IFLA_MAX_PACING_OFFLOAD_HORIZON, |
2078 | READ_ONCE(dev->max_pacing_offload_horizon)) || |
2079 | #ifdef CONFIG_RPS |
2080 | nla_put_u32(skb, attrtype: IFLA_NUM_RX_QUEUES, |
2081 | READ_ONCE(dev->num_rx_queues)) || |
2082 | #endif |
2083 | put_master_ifindex(skb, dev) || |
2084 | nla_put_u8(skb, attrtype: IFLA_CARRIER, value: netif_carrier_ok(dev)) || |
2085 | nla_put_ifalias(skb, dev) || |
2086 | nla_put_u32(skb, attrtype: IFLA_CARRIER_CHANGES, |
2087 | value: atomic_read(v: &dev->carrier_up_count) + |
2088 | atomic_read(v: &dev->carrier_down_count)) || |
2089 | nla_put_u32(skb, attrtype: IFLA_CARRIER_UP_COUNT, |
2090 | value: atomic_read(v: &dev->carrier_up_count)) || |
2091 | nla_put_u32(skb, attrtype: IFLA_CARRIER_DOWN_COUNT, |
2092 | value: atomic_read(v: &dev->carrier_down_count))) |
2093 | goto nla_put_failure; |
2094 | |
2095 | if (rtnl_fill_proto_down(skb, dev)) |
2096 | goto nla_put_failure; |
2097 | |
2098 | if (event != IFLA_EVENT_NONE) { |
2099 | if (nla_put_u32(skb, attrtype: IFLA_EVENT, value: event)) |
2100 | goto nla_put_failure; |
2101 | } |
2102 | |
2103 | if (dev->addr_len) { |
2104 | if (nla_put(skb, attrtype: IFLA_ADDRESS, attrlen: dev->addr_len, data: dev->dev_addr) || |
2105 | nla_put(skb, attrtype: IFLA_BROADCAST, attrlen: dev->addr_len, data: dev->broadcast)) |
2106 | goto nla_put_failure; |
2107 | } |
2108 | |
2109 | if (rtnl_phys_port_id_fill(skb, dev)) |
2110 | goto nla_put_failure; |
2111 | |
2112 | if (rtnl_phys_port_name_fill(skb, dev)) |
2113 | goto nla_put_failure; |
2114 | |
2115 | if (rtnl_phys_switch_id_fill(skb, dev)) |
2116 | goto nla_put_failure; |
2117 | |
2118 | if (rtnl_fill_stats(skb, dev)) |
2119 | goto nla_put_failure; |
2120 | |
2121 | if (rtnl_fill_vf(skb, dev, ext_filter_mask)) |
2122 | goto nla_put_failure; |
2123 | |
2124 | if (rtnl_port_fill(skb, dev, ext_filter_mask)) |
2125 | goto nla_put_failure; |
2126 | |
2127 | if (rtnl_xdp_fill(skb, dev)) |
2128 | goto nla_put_failure; |
2129 | |
2130 | if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { |
2131 | if (rtnl_link_fill(skb, dev) < 0) |
2132 | goto nla_put_failure; |
2133 | } |
2134 | |
2135 | if (new_nsid && |
2136 | nla_put_s32(skb, attrtype: IFLA_NEW_NETNSID, value: *new_nsid) < 0) |
2137 | goto nla_put_failure; |
2138 | if (new_ifindex && |
2139 | nla_put_s32(skb, attrtype: IFLA_NEW_IFINDEX, value: new_ifindex) < 0) |
2140 | goto nla_put_failure; |
2141 | |
2142 | if (memchr_inv(p: dev->perm_addr, c: '\0', size: dev->addr_len) && |
2143 | nla_put(skb, attrtype: IFLA_PERM_ADDRESS, attrlen: dev->addr_len, data: dev->perm_addr)) |
2144 | goto nla_put_failure; |
2145 | |
2146 | rcu_read_lock(); |
2147 | if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC)) |
2148 | goto nla_put_failure_rcu; |
2149 | qdisc = rcu_dereference(dev->qdisc); |
2150 | if (qdisc && nla_put_string(skb, attrtype: IFLA_QDISC, str: qdisc->ops->id)) |
2151 | goto nla_put_failure_rcu; |
2152 | if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) |
2153 | goto nla_put_failure_rcu; |
2154 | if (rtnl_fill_link_ifmap(skb, dev)) |
2155 | goto nla_put_failure_rcu; |
2156 | if (rtnl_fill_prop_list(skb, dev)) |
2157 | goto nla_put_failure_rcu; |
2158 | rcu_read_unlock(); |
2159 | |
2160 | if (dev->dev.parent && |
2161 | nla_put_string(skb, attrtype: IFLA_PARENT_DEV_NAME, |
2162 | str: dev_name(dev: dev->dev.parent))) |
2163 | goto nla_put_failure; |
2164 | |
2165 | if (dev->dev.parent && dev->dev.parent->bus && |
2166 | nla_put_string(skb, attrtype: IFLA_PARENT_DEV_BUS_NAME, |
2167 | str: dev->dev.parent->bus->name)) |
2168 | goto nla_put_failure; |
2169 | |
2170 | if (rtnl_fill_devlink_port(skb, dev)) |
2171 | goto nla_put_failure; |
2172 | |
2173 | if (rtnl_fill_dpll_pin(skb, dev)) |
2174 | goto nla_put_failure; |
2175 | |
2176 | nlmsg_end(skb, nlh); |
2177 | return 0; |
2178 | |
2179 | nla_put_failure_rcu: |
2180 | rcu_read_unlock(); |
2181 | nla_put_failure: |
2182 | nlmsg_cancel(skb, nlh); |
2183 | return -EMSGSIZE; |
2184 | } |
2185 | |
2186 | static const struct nla_policy ifla_policy[IFLA_MAX+1] = { |
2187 | [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN }, |
2188 | [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, |
2189 | [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, |
2190 | [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, |
2191 | [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, |
2192 | [IFLA_MTU] = { .type = NLA_U32 }, |
2193 | [IFLA_LINK] = { .type = NLA_U32 }, |
2194 | [IFLA_MASTER] = { .type = NLA_U32 }, |
2195 | [IFLA_CARRIER] = { .type = NLA_U8 }, |
2196 | [IFLA_TXQLEN] = { .type = NLA_U32 }, |
2197 | [IFLA_WEIGHT] = { .type = NLA_U32 }, |
2198 | [IFLA_OPERSTATE] = { .type = NLA_U8 }, |
2199 | [IFLA_LINKMODE] = { .type = NLA_U8 }, |
2200 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
2201 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
2202 | [IFLA_NET_NS_FD] = { .type = NLA_U32 }, |
2203 | /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to |
2204 | * allow 0-length string (needed to remove an alias). |
2205 | */ |
2206 | [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, |
2207 | [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, |
2208 | [IFLA_VF_PORTS] = { .type = NLA_NESTED }, |
2209 | [IFLA_PORT_SELF] = { .type = NLA_NESTED }, |
2210 | [IFLA_AF_SPEC] = { .type = NLA_NESTED }, |
2211 | [IFLA_EXT_MASK] = { .type = NLA_U32 }, |
2212 | [IFLA_PROMISCUITY] = { .type = NLA_U32 }, |
2213 | [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, |
2214 | [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, |
2215 | [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, |
2216 | [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), |
2217 | [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, |
2218 | [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ |
2219 | [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, |
2220 | [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, |
2221 | [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, |
2222 | [IFLA_XDP] = { .type = NLA_NESTED }, |
2223 | [IFLA_EVENT] = { .type = NLA_U32 }, |
2224 | [IFLA_GROUP] = { .type = NLA_U32 }, |
2225 | [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, |
2226 | [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, |
2227 | [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, |
2228 | [IFLA_MIN_MTU] = { .type = NLA_U32 }, |
2229 | [IFLA_MAX_MTU] = { .type = NLA_U32 }, |
2230 | [IFLA_PROP_LIST] = { .type = NLA_NESTED }, |
2231 | [IFLA_ALT_IFNAME] = { .type = NLA_STRING, |
2232 | .len = ALTIFNAMSIZ - 1 }, |
2233 | [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, |
2234 | [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, |
2235 | [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), |
2236 | [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, |
2237 | [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, |
2238 | [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, |
2239 | [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, |
2240 | [IFLA_ALLMULTI] = { .type = NLA_REJECT }, |
2241 | [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), |
2242 | [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, |
2243 | [IFLA_NETNS_IMMUTABLE] = { .type = NLA_REJECT }, |
2244 | }; |
2245 | |
2246 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
2247 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, |
2248 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
2249 | [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, |
2250 | [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, |
2251 | }; |
2252 | |
2253 | static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { |
2254 | [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, |
2255 | [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, |
2256 | [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, |
2257 | [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, |
2258 | [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, |
2259 | [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, |
2260 | [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, |
2261 | [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, |
2262 | [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, |
2263 | [IFLA_VF_STATS] = { .type = NLA_NESTED }, |
2264 | [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, |
2265 | [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, |
2266 | [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, |
2267 | }; |
2268 | |
2269 | static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { |
2270 | [IFLA_PORT_VF] = { .type = NLA_U32 }, |
2271 | [IFLA_PORT_PROFILE] = { .type = NLA_STRING, |
2272 | .len = PORT_PROFILE_MAX }, |
2273 | [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, |
2274 | .len = PORT_UUID_MAX }, |
2275 | [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, |
2276 | .len = PORT_UUID_MAX }, |
2277 | [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, |
2278 | [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, |
2279 | |
2280 | /* Unused, but we need to keep it here since user space could |
2281 | * fill it. It's also broken with regard to NLA_BINARY use in |
2282 | * combination with structs. |
2283 | */ |
2284 | [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, |
2285 | .len = sizeof(struct ifla_port_vsi) }, |
2286 | }; |
2287 | |
2288 | static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { |
2289 | [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, |
2290 | [IFLA_XDP_FD] = { .type = NLA_S32 }, |
2291 | [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, |
2292 | [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, |
2293 | [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, |
2294 | [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, |
2295 | }; |
2296 | |
2297 | static struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla, |
2298 | int *ops_srcu_index) |
2299 | { |
2300 | struct nlattr *linfo[IFLA_INFO_MAX + 1]; |
2301 | struct rtnl_link_ops *ops = NULL; |
2302 | |
2303 | if (nla_parse_nested_deprecated(tb: linfo, IFLA_INFO_MAX, nla, policy: ifla_info_policy, NULL) < 0) |
2304 | return NULL; |
2305 | |
2306 | if (linfo[IFLA_INFO_KIND]) { |
2307 | char kind[MODULE_NAME_LEN]; |
2308 | |
2309 | nla_strscpy(dst: kind, nla: linfo[IFLA_INFO_KIND], dstsize: sizeof(kind)); |
2310 | ops = rtnl_link_ops_get(kind, srcu_index: ops_srcu_index); |
2311 | } |
2312 | |
2313 | return ops; |
2314 | } |
2315 | |
2316 | static bool link_master_filtered(struct net_device *dev, int master_idx) |
2317 | { |
2318 | struct net_device *master; |
2319 | |
2320 | if (!master_idx) |
2321 | return false; |
2322 | |
2323 | master = netdev_master_upper_dev_get(dev); |
2324 | |
2325 | /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need |
2326 | * another invalid value for ifindex to denote "no master". |
2327 | */ |
2328 | if (master_idx == -1) |
2329 | return !!master; |
2330 | |
2331 | if (!master || master->ifindex != master_idx) |
2332 | return true; |
2333 | |
2334 | return false; |
2335 | } |
2336 | |
2337 | static bool link_kind_filtered(const struct net_device *dev, |
2338 | const struct rtnl_link_ops *kind_ops) |
2339 | { |
2340 | if (kind_ops && dev->rtnl_link_ops != kind_ops) |
2341 | return true; |
2342 | |
2343 | return false; |
2344 | } |
2345 | |
2346 | static bool link_dump_filtered(struct net_device *dev, |
2347 | int master_idx, |
2348 | const struct rtnl_link_ops *kind_ops) |
2349 | { |
2350 | if (link_master_filtered(dev, master_idx) || |
2351 | link_kind_filtered(dev, kind_ops)) |
2352 | return true; |
2353 | |
2354 | return false; |
2355 | } |
2356 | |
2357 | /** |
2358 | * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. |
2359 | * @sk: netlink socket |
2360 | * @netnsid: network namespace identifier |
2361 | * |
2362 | * Returns the network namespace identified by netnsid on success or an error |
2363 | * pointer on failure. |
2364 | */ |
2365 | struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) |
2366 | { |
2367 | struct net *net; |
2368 | |
2369 | net = get_net_ns_by_id(net: sock_net(sk), id: netnsid); |
2370 | if (!net) |
2371 | return ERR_PTR(error: -EINVAL); |
2372 | |
2373 | /* For now, the caller is required to have CAP_NET_ADMIN in |
2374 | * the user namespace owning the target net ns. |
2375 | */ |
2376 | if (!sk_ns_capable(sk, user_ns: net->user_ns, CAP_NET_ADMIN)) { |
2377 | put_net(net); |
2378 | return ERR_PTR(error: -EACCES); |
2379 | } |
2380 | return net; |
2381 | } |
2382 | EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); |
2383 | |
2384 | static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, |
2385 | bool strict_check, struct nlattr **tb, |
2386 | struct netlink_ext_ack *extack) |
2387 | { |
2388 | int hdrlen; |
2389 | |
2390 | if (strict_check) { |
2391 | struct ifinfomsg *ifm; |
2392 | |
2393 | ifm = nlmsg_payload(nlh, len: sizeof(*ifm)); |
2394 | if (!ifm) { |
2395 | NL_SET_ERR_MSG(extack, "Invalid header for link dump" ); |
2396 | return -EINVAL; |
2397 | } |
2398 | |
2399 | if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || |
2400 | ifm->ifi_change) { |
2401 | NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request" ); |
2402 | return -EINVAL; |
2403 | } |
2404 | if (ifm->ifi_index) { |
2405 | NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps" ); |
2406 | return -EINVAL; |
2407 | } |
2408 | |
2409 | return nlmsg_parse_deprecated_strict(nlh, hdrlen: sizeof(*ifm), tb, |
2410 | IFLA_MAX, policy: ifla_policy, |
2411 | extack); |
2412 | } |
2413 | |
2414 | /* A hack to preserve kernel<->userspace interface. |
2415 | * The correct header is ifinfomsg. It is consistent with rtnl_getlink. |
2416 | * However, before Linux v3.9 the code here assumed rtgenmsg and that's |
2417 | * what iproute2 < v3.9.0 used. |
2418 | * We can detect the old iproute2. Even including the IFLA_EXT_MASK |
2419 | * attribute, its netlink message is shorter than struct ifinfomsg. |
2420 | */ |
2421 | hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? |
2422 | sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); |
2423 | |
2424 | return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, policy: ifla_policy, |
2425 | extack); |
2426 | } |
2427 | |
2428 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
2429 | { |
2430 | struct netlink_ext_ack *extack = cb->extack; |
2431 | struct rtnl_link_ops *kind_ops = NULL; |
2432 | const struct nlmsghdr *nlh = cb->nlh; |
2433 | struct net *net = sock_net(sk: skb->sk); |
2434 | unsigned int flags = NLM_F_MULTI; |
2435 | struct nlattr *tb[IFLA_MAX+1]; |
2436 | struct { |
2437 | unsigned long ifindex; |
2438 | } *ctx = (void *)cb->ctx; |
2439 | struct net *tgt_net = net; |
2440 | u32 ext_filter_mask = 0; |
2441 | struct net_device *dev; |
2442 | int ops_srcu_index; |
2443 | int master_idx = 0; |
2444 | int netnsid = -1; |
2445 | int err, i; |
2446 | |
2447 | err = rtnl_valid_dump_ifinfo_req(nlh, strict_check: cb->strict_check, tb, extack); |
2448 | if (err < 0) { |
2449 | if (cb->strict_check) |
2450 | return err; |
2451 | |
2452 | goto walk_entries; |
2453 | } |
2454 | |
2455 | for (i = 0; i <= IFLA_MAX; ++i) { |
2456 | if (!tb[i]) |
2457 | continue; |
2458 | |
2459 | /* new attributes should only be added with strict checking */ |
2460 | switch (i) { |
2461 | case IFLA_TARGET_NETNSID: |
2462 | netnsid = nla_get_s32(nla: tb[i]); |
2463 | tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); |
2464 | if (IS_ERR(ptr: tgt_net)) { |
2465 | NL_SET_ERR_MSG(extack, "Invalid target network namespace id" ); |
2466 | err = PTR_ERR(ptr: tgt_net); |
2467 | netnsid = -1; |
2468 | goto out; |
2469 | } |
2470 | break; |
2471 | case IFLA_EXT_MASK: |
2472 | ext_filter_mask = nla_get_u32(nla: tb[i]); |
2473 | break; |
2474 | case IFLA_MASTER: |
2475 | master_idx = nla_get_u32(nla: tb[i]); |
2476 | break; |
2477 | case IFLA_LINKINFO: |
2478 | kind_ops = linkinfo_to_kind_ops(nla: tb[i], ops_srcu_index: &ops_srcu_index); |
2479 | break; |
2480 | default: |
2481 | if (cb->strict_check) { |
2482 | NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request" ); |
2483 | err = -EINVAL; |
2484 | goto out; |
2485 | } |
2486 | } |
2487 | } |
2488 | |
2489 | if (master_idx || kind_ops) |
2490 | flags |= NLM_F_DUMP_FILTERED; |
2491 | |
2492 | walk_entries: |
2493 | err = 0; |
2494 | for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { |
2495 | if (link_dump_filtered(dev, master_idx, kind_ops)) |
2496 | continue; |
2497 | err = rtnl_fill_ifinfo(skb, dev, src_net: net, RTM_NEWLINK, |
2498 | NETLINK_CB(cb->skb).portid, |
2499 | seq: nlh->nlmsg_seq, change: 0, flags, |
2500 | ext_filter_mask, event: 0, NULL, new_ifindex: 0, |
2501 | tgt_netnsid: netnsid, GFP_KERNEL); |
2502 | if (err < 0) |
2503 | break; |
2504 | } |
2505 | |
2506 | |
2507 | cb->seq = tgt_net->dev_base_seq; |
2508 | nl_dump_check_consistent(cb, nlh: nlmsg_hdr(skb)); |
2509 | |
2510 | out: |
2511 | |
2512 | if (kind_ops) |
2513 | rtnl_link_ops_put(ops: kind_ops, srcu_index: ops_srcu_index); |
2514 | if (netnsid >= 0) |
2515 | put_net(net: tgt_net); |
2516 | |
2517 | return err; |
2518 | } |
2519 | |
2520 | int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, |
2521 | struct netlink_ext_ack *exterr) |
2522 | { |
2523 | const struct ifinfomsg *ifmp; |
2524 | const struct nlattr *attrs; |
2525 | size_t len; |
2526 | |
2527 | ifmp = nla_data(nla: nla_peer); |
2528 | attrs = nla_data(nla: nla_peer) + sizeof(struct ifinfomsg); |
2529 | len = nla_len(nla: nla_peer) - sizeof(struct ifinfomsg); |
2530 | |
2531 | if (ifmp->ifi_index < 0) { |
2532 | NL_SET_ERR_MSG_ATTR(exterr, nla_peer, |
2533 | "ifindex can't be negative" ); |
2534 | return -EINVAL; |
2535 | } |
2536 | |
2537 | return nla_parse_deprecated(tb, IFLA_MAX, head: attrs, len, policy: ifla_policy, |
2538 | extack: exterr); |
2539 | } |
2540 | EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); |
2541 | |
2542 | static struct net *rtnl_link_get_net_ifla(struct nlattr *tb[]) |
2543 | { |
2544 | struct net *net = NULL; |
2545 | |
2546 | /* Examine the link attributes and figure out which |
2547 | * network namespace we are talking about. |
2548 | */ |
2549 | if (tb[IFLA_NET_NS_PID]) |
2550 | net = get_net_ns_by_pid(pid: nla_get_u32(nla: tb[IFLA_NET_NS_PID])); |
2551 | else if (tb[IFLA_NET_NS_FD]) |
2552 | net = get_net_ns_by_fd(fd: nla_get_u32(nla: tb[IFLA_NET_NS_FD])); |
2553 | |
2554 | return net; |
2555 | } |
2556 | |
2557 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) |
2558 | { |
2559 | struct net *net = rtnl_link_get_net_ifla(tb); |
2560 | |
2561 | if (!net) |
2562 | net = get_net(net: src_net); |
2563 | |
2564 | return net; |
2565 | } |
2566 | EXPORT_SYMBOL(rtnl_link_get_net); |
2567 | |
2568 | /* Figure out which network namespace we are talking about by |
2569 | * examining the link attributes in the following order: |
2570 | * |
2571 | * 1. IFLA_NET_NS_PID |
2572 | * 2. IFLA_NET_NS_FD |
2573 | * 3. IFLA_TARGET_NETNSID |
2574 | */ |
2575 | static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, |
2576 | struct nlattr *tb[]) |
2577 | { |
2578 | struct net *net; |
2579 | |
2580 | if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) |
2581 | return rtnl_link_get_net(src_net, tb); |
2582 | |
2583 | if (!tb[IFLA_TARGET_NETNSID]) |
2584 | return get_net(net: src_net); |
2585 | |
2586 | net = get_net_ns_by_id(net: src_net, id: nla_get_u32(nla: tb[IFLA_TARGET_NETNSID])); |
2587 | if (!net) |
2588 | return ERR_PTR(error: -EINVAL); |
2589 | |
2590 | return net; |
2591 | } |
2592 | |
2593 | static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, |
2594 | struct net *src_net, |
2595 | struct nlattr *tb[], int cap) |
2596 | { |
2597 | struct net *net; |
2598 | |
2599 | net = rtnl_link_get_net_by_nlattr(src_net, tb); |
2600 | if (IS_ERR(ptr: net)) |
2601 | return net; |
2602 | |
2603 | if (!netlink_ns_capable(skb, ns: net->user_ns, cap)) { |
2604 | put_net(net); |
2605 | return ERR_PTR(error: -EPERM); |
2606 | } |
2607 | |
2608 | return net; |
2609 | } |
2610 | |
2611 | /* Verify that rtnetlink requests do not pass additional properties |
2612 | * potentially referring to different network namespaces. |
2613 | */ |
2614 | static int rtnl_ensure_unique_netns(struct nlattr *tb[], |
2615 | struct netlink_ext_ack *extack, |
2616 | bool netns_id_only) |
2617 | { |
2618 | |
2619 | if (netns_id_only) { |
2620 | if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) |
2621 | return 0; |
2622 | |
2623 | NL_SET_ERR_MSG(extack, "specified netns attribute not supported" ); |
2624 | return -EOPNOTSUPP; |
2625 | } |
2626 | |
2627 | if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) |
2628 | goto invalid_attr; |
2629 | |
2630 | if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) |
2631 | goto invalid_attr; |
2632 | |
2633 | if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) |
2634 | goto invalid_attr; |
2635 | |
2636 | return 0; |
2637 | |
2638 | invalid_attr: |
2639 | NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified" ); |
2640 | return -EINVAL; |
2641 | } |
2642 | |
2643 | static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, |
2644 | int max_tx_rate) |
2645 | { |
2646 | const struct net_device_ops *ops = dev->netdev_ops; |
2647 | |
2648 | if (!ops->ndo_set_vf_rate) |
2649 | return -EOPNOTSUPP; |
2650 | if (max_tx_rate && max_tx_rate < min_tx_rate) |
2651 | return -EINVAL; |
2652 | |
2653 | return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); |
2654 | } |
2655 | |
2656 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], |
2657 | struct netlink_ext_ack *extack) |
2658 | { |
2659 | if (tb[IFLA_ADDRESS] && |
2660 | nla_len(nla: tb[IFLA_ADDRESS]) < dev->addr_len) |
2661 | return -EINVAL; |
2662 | |
2663 | if (tb[IFLA_BROADCAST] && |
2664 | nla_len(nla: tb[IFLA_BROADCAST]) < dev->addr_len) |
2665 | return -EINVAL; |
2666 | |
2667 | if (tb[IFLA_GSO_MAX_SIZE] && |
2668 | nla_get_u32(nla: tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { |
2669 | NL_SET_ERR_MSG(extack, "too big gso_max_size" ); |
2670 | return -EINVAL; |
2671 | } |
2672 | |
2673 | if (tb[IFLA_GSO_MAX_SEGS] && |
2674 | (nla_get_u32(nla: tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || |
2675 | nla_get_u32(nla: tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { |
2676 | NL_SET_ERR_MSG(extack, "too big gso_max_segs" ); |
2677 | return -EINVAL; |
2678 | } |
2679 | |
2680 | if (tb[IFLA_GRO_MAX_SIZE] && |
2681 | nla_get_u32(nla: tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { |
2682 | NL_SET_ERR_MSG(extack, "too big gro_max_size" ); |
2683 | return -EINVAL; |
2684 | } |
2685 | |
2686 | if (tb[IFLA_GSO_IPV4_MAX_SIZE] && |
2687 | nla_get_u32(nla: tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { |
2688 | NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size" ); |
2689 | return -EINVAL; |
2690 | } |
2691 | |
2692 | if (tb[IFLA_GRO_IPV4_MAX_SIZE] && |
2693 | nla_get_u32(nla: tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { |
2694 | NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size" ); |
2695 | return -EINVAL; |
2696 | } |
2697 | |
2698 | if (tb[IFLA_AF_SPEC]) { |
2699 | struct nlattr *af; |
2700 | int rem, err; |
2701 | |
2702 | nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { |
2703 | struct rtnl_af_ops *af_ops; |
2704 | int af_ops_srcu_index; |
2705 | |
2706 | af_ops = rtnl_af_lookup(family: nla_type(nla: af), srcu_index: &af_ops_srcu_index); |
2707 | if (!af_ops) |
2708 | return -EAFNOSUPPORT; |
2709 | |
2710 | if (!af_ops->set_link_af) |
2711 | err = -EOPNOTSUPP; |
2712 | else if (af_ops->validate_link_af) |
2713 | err = af_ops->validate_link_af(dev, af, extack); |
2714 | else |
2715 | err = 0; |
2716 | |
2717 | rtnl_af_put(ops: af_ops, srcu_index: af_ops_srcu_index); |
2718 | |
2719 | if (err < 0) |
2720 | return err; |
2721 | } |
2722 | } |
2723 | |
2724 | return 0; |
2725 | } |
2726 | |
2727 | static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, |
2728 | int guid_type) |
2729 | { |
2730 | const struct net_device_ops *ops = dev->netdev_ops; |
2731 | |
2732 | return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); |
2733 | } |
2734 | |
2735 | static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) |
2736 | { |
2737 | if (dev->type != ARPHRD_INFINIBAND) |
2738 | return -EOPNOTSUPP; |
2739 | |
2740 | return handle_infiniband_guid(dev, ivt, guid_type); |
2741 | } |
2742 | |
2743 | static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
2744 | { |
2745 | const struct net_device_ops *ops = dev->netdev_ops; |
2746 | int err = -EINVAL; |
2747 | |
2748 | if (tb[IFLA_VF_MAC]) { |
2749 | struct ifla_vf_mac *ivm = nla_data(nla: tb[IFLA_VF_MAC]); |
2750 | |
2751 | if (ivm->vf >= INT_MAX) |
2752 | return -EINVAL; |
2753 | err = -EOPNOTSUPP; |
2754 | if (ops->ndo_set_vf_mac) |
2755 | err = ops->ndo_set_vf_mac(dev, ivm->vf, |
2756 | ivm->mac); |
2757 | if (err < 0) |
2758 | return err; |
2759 | } |
2760 | |
2761 | if (tb[IFLA_VF_VLAN]) { |
2762 | struct ifla_vf_vlan *ivv = nla_data(nla: tb[IFLA_VF_VLAN]); |
2763 | |
2764 | if (ivv->vf >= INT_MAX) |
2765 | return -EINVAL; |
2766 | err = -EOPNOTSUPP; |
2767 | if (ops->ndo_set_vf_vlan) |
2768 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, |
2769 | ivv->qos, |
2770 | htons(ETH_P_8021Q)); |
2771 | if (err < 0) |
2772 | return err; |
2773 | } |
2774 | |
2775 | if (tb[IFLA_VF_VLAN_LIST]) { |
2776 | struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; |
2777 | struct nlattr *attr; |
2778 | int rem, len = 0; |
2779 | |
2780 | err = -EOPNOTSUPP; |
2781 | if (!ops->ndo_set_vf_vlan) |
2782 | return err; |
2783 | |
2784 | nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { |
2785 | if (nla_type(nla: attr) != IFLA_VF_VLAN_INFO || |
2786 | nla_len(nla: attr) < sizeof(struct ifla_vf_vlan_info)) { |
2787 | return -EINVAL; |
2788 | } |
2789 | if (len >= MAX_VLAN_LIST_LEN) |
2790 | return -EOPNOTSUPP; |
2791 | ivvl[len] = nla_data(nla: attr); |
2792 | |
2793 | len++; |
2794 | } |
2795 | if (len == 0) |
2796 | return -EINVAL; |
2797 | |
2798 | if (ivvl[0]->vf >= INT_MAX) |
2799 | return -EINVAL; |
2800 | err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, |
2801 | ivvl[0]->qos, ivvl[0]->vlan_proto); |
2802 | if (err < 0) |
2803 | return err; |
2804 | } |
2805 | |
2806 | if (tb[IFLA_VF_TX_RATE]) { |
2807 | struct ifla_vf_tx_rate *ivt = nla_data(nla: tb[IFLA_VF_TX_RATE]); |
2808 | struct ifla_vf_info ivf; |
2809 | |
2810 | if (ivt->vf >= INT_MAX) |
2811 | return -EINVAL; |
2812 | err = -EOPNOTSUPP; |
2813 | if (ops->ndo_get_vf_config) |
2814 | err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); |
2815 | if (err < 0) |
2816 | return err; |
2817 | |
2818 | err = rtnl_set_vf_rate(dev, vf: ivt->vf, |
2819 | min_tx_rate: ivf.min_tx_rate, max_tx_rate: ivt->rate); |
2820 | if (err < 0) |
2821 | return err; |
2822 | } |
2823 | |
2824 | if (tb[IFLA_VF_RATE]) { |
2825 | struct ifla_vf_rate *ivt = nla_data(nla: tb[IFLA_VF_RATE]); |
2826 | |
2827 | if (ivt->vf >= INT_MAX) |
2828 | return -EINVAL; |
2829 | |
2830 | err = rtnl_set_vf_rate(dev, vf: ivt->vf, |
2831 | min_tx_rate: ivt->min_tx_rate, max_tx_rate: ivt->max_tx_rate); |
2832 | if (err < 0) |
2833 | return err; |
2834 | } |
2835 | |
2836 | if (tb[IFLA_VF_SPOOFCHK]) { |
2837 | struct ifla_vf_spoofchk *ivs = nla_data(nla: tb[IFLA_VF_SPOOFCHK]); |
2838 | |
2839 | if (ivs->vf >= INT_MAX) |
2840 | return -EINVAL; |
2841 | err = -EOPNOTSUPP; |
2842 | if (ops->ndo_set_vf_spoofchk) |
2843 | err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, |
2844 | ivs->setting); |
2845 | if (err < 0) |
2846 | return err; |
2847 | } |
2848 | |
2849 | if (tb[IFLA_VF_LINK_STATE]) { |
2850 | struct ifla_vf_link_state *ivl = nla_data(nla: tb[IFLA_VF_LINK_STATE]); |
2851 | |
2852 | if (ivl->vf >= INT_MAX) |
2853 | return -EINVAL; |
2854 | err = -EOPNOTSUPP; |
2855 | if (ops->ndo_set_vf_link_state) |
2856 | err = ops->ndo_set_vf_link_state(dev, ivl->vf, |
2857 | ivl->link_state); |
2858 | if (err < 0) |
2859 | return err; |
2860 | } |
2861 | |
2862 | if (tb[IFLA_VF_RSS_QUERY_EN]) { |
2863 | struct ifla_vf_rss_query_en *; |
2864 | |
2865 | err = -EOPNOTSUPP; |
2866 | ivrssq_en = nla_data(nla: tb[IFLA_VF_RSS_QUERY_EN]); |
2867 | if (ivrssq_en->vf >= INT_MAX) |
2868 | return -EINVAL; |
2869 | if (ops->ndo_set_vf_rss_query_en) |
2870 | err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, |
2871 | ivrssq_en->setting); |
2872 | if (err < 0) |
2873 | return err; |
2874 | } |
2875 | |
2876 | if (tb[IFLA_VF_TRUST]) { |
2877 | struct ifla_vf_trust *ivt = nla_data(nla: tb[IFLA_VF_TRUST]); |
2878 | |
2879 | if (ivt->vf >= INT_MAX) |
2880 | return -EINVAL; |
2881 | err = -EOPNOTSUPP; |
2882 | if (ops->ndo_set_vf_trust) |
2883 | err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); |
2884 | if (err < 0) |
2885 | return err; |
2886 | } |
2887 | |
2888 | if (tb[IFLA_VF_IB_NODE_GUID]) { |
2889 | struct ifla_vf_guid *ivt = nla_data(nla: tb[IFLA_VF_IB_NODE_GUID]); |
2890 | |
2891 | if (ivt->vf >= INT_MAX) |
2892 | return -EINVAL; |
2893 | if (!ops->ndo_set_vf_guid) |
2894 | return -EOPNOTSUPP; |
2895 | return handle_vf_guid(dev, ivt, guid_type: IFLA_VF_IB_NODE_GUID); |
2896 | } |
2897 | |
2898 | if (tb[IFLA_VF_IB_PORT_GUID]) { |
2899 | struct ifla_vf_guid *ivt = nla_data(nla: tb[IFLA_VF_IB_PORT_GUID]); |
2900 | |
2901 | if (ivt->vf >= INT_MAX) |
2902 | return -EINVAL; |
2903 | if (!ops->ndo_set_vf_guid) |
2904 | return -EOPNOTSUPP; |
2905 | |
2906 | return handle_vf_guid(dev, ivt, guid_type: IFLA_VF_IB_PORT_GUID); |
2907 | } |
2908 | |
2909 | return err; |
2910 | } |
2911 | |
2912 | static int do_set_master(struct net_device *dev, int ifindex, |
2913 | struct netlink_ext_ack *extack) |
2914 | { |
2915 | struct net_device *upper_dev = netdev_master_upper_dev_get(dev); |
2916 | const struct net_device_ops *ops; |
2917 | int err; |
2918 | |
2919 | /* Release the lower lock, the upper is responsible for locking |
2920 | * the lower if needed. None of the existing upper devices |
2921 | * use netdev instance lock, so don't grab it. |
2922 | */ |
2923 | |
2924 | if (upper_dev) { |
2925 | if (upper_dev->ifindex == ifindex) |
2926 | return 0; |
2927 | ops = upper_dev->netdev_ops; |
2928 | if (ops->ndo_del_slave) { |
2929 | netdev_unlock_ops(dev); |
2930 | err = ops->ndo_del_slave(upper_dev, dev); |
2931 | netdev_lock_ops(dev); |
2932 | if (err) |
2933 | return err; |
2934 | } else { |
2935 | return -EOPNOTSUPP; |
2936 | } |
2937 | } |
2938 | |
2939 | if (ifindex) { |
2940 | upper_dev = __dev_get_by_index(net: dev_net(dev), ifindex); |
2941 | if (!upper_dev) |
2942 | return -EINVAL; |
2943 | ops = upper_dev->netdev_ops; |
2944 | if (ops->ndo_add_slave) { |
2945 | netdev_unlock_ops(dev); |
2946 | err = ops->ndo_add_slave(upper_dev, dev, extack); |
2947 | netdev_lock_ops(dev); |
2948 | if (err) |
2949 | return err; |
2950 | } else { |
2951 | return -EOPNOTSUPP; |
2952 | } |
2953 | } |
2954 | return 0; |
2955 | } |
2956 | |
2957 | static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { |
2958 | [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, |
2959 | [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, |
2960 | }; |
2961 | |
2962 | static int do_set_proto_down(struct net_device *dev, |
2963 | struct nlattr *nl_proto_down, |
2964 | struct nlattr *nl_proto_down_reason, |
2965 | struct netlink_ext_ack *extack) |
2966 | { |
2967 | struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; |
2968 | unsigned long mask = 0; |
2969 | u32 value; |
2970 | bool proto_down; |
2971 | int err; |
2972 | |
2973 | if (!dev->change_proto_down) { |
2974 | NL_SET_ERR_MSG(extack, "Protodown not supported by device" ); |
2975 | return -EOPNOTSUPP; |
2976 | } |
2977 | |
2978 | if (nl_proto_down_reason) { |
2979 | err = nla_parse_nested_deprecated(tb: pdreason, |
2980 | maxtype: IFLA_PROTO_DOWN_REASON_MAX, |
2981 | nla: nl_proto_down_reason, |
2982 | policy: ifla_proto_down_reason_policy, |
2983 | NULL); |
2984 | if (err < 0) |
2985 | return err; |
2986 | |
2987 | if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { |
2988 | NL_SET_ERR_MSG(extack, "Invalid protodown reason value" ); |
2989 | return -EINVAL; |
2990 | } |
2991 | |
2992 | value = nla_get_u32(nla: pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); |
2993 | |
2994 | if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) |
2995 | mask = nla_get_u32(nla: pdreason[IFLA_PROTO_DOWN_REASON_MASK]); |
2996 | |
2997 | netdev_change_proto_down_reason_locked(dev, mask, value); |
2998 | } |
2999 | |
3000 | if (nl_proto_down) { |
3001 | proto_down = nla_get_u8(nla: nl_proto_down); |
3002 | |
3003 | /* Don't turn off protodown if there are active reasons */ |
3004 | if (!proto_down && dev->proto_down_reason) { |
3005 | NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons" ); |
3006 | return -EBUSY; |
3007 | } |
3008 | err = netif_change_proto_down(dev, proto_down); |
3009 | if (err) |
3010 | return err; |
3011 | } |
3012 | |
3013 | return 0; |
3014 | } |
3015 | |
3016 | #define DO_SETLINK_MODIFIED 0x01 |
3017 | /* notify flag means notify + modified. */ |
3018 | #define DO_SETLINK_NOTIFY 0x03 |
3019 | static int do_setlink(const struct sk_buff *skb, struct net_device *dev, |
3020 | struct net *tgt_net, struct ifinfomsg *ifm, |
3021 | struct netlink_ext_ack *extack, |
3022 | struct nlattr **tb, int status) |
3023 | { |
3024 | const struct net_device_ops *ops = dev->netdev_ops; |
3025 | char ifname[IFNAMSIZ]; |
3026 | int err; |
3027 | |
3028 | err = validate_linkmsg(dev, tb, extack); |
3029 | if (err < 0) |
3030 | return err; |
3031 | |
3032 | if (tb[IFLA_IFNAME]) |
3033 | nla_strscpy(dst: ifname, nla: tb[IFLA_IFNAME], IFNAMSIZ); |
3034 | else |
3035 | ifname[0] = '\0'; |
3036 | |
3037 | if (!net_eq(net1: tgt_net, net2: dev_net(dev))) { |
3038 | const char *pat = ifname[0] ? ifname : NULL; |
3039 | int new_ifindex; |
3040 | |
3041 | new_ifindex = nla_get_s32_default(nla: tb[IFLA_NEW_IFINDEX], defvalue: 0); |
3042 | |
3043 | err = __dev_change_net_namespace(dev, net: tgt_net, pat, |
3044 | new_ifindex, extack); |
3045 | if (err) |
3046 | return err; |
3047 | |
3048 | status |= DO_SETLINK_MODIFIED; |
3049 | } |
3050 | |
3051 | netdev_lock_ops(dev); |
3052 | |
3053 | if (tb[IFLA_MAP]) { |
3054 | struct rtnl_link_ifmap *u_map; |
3055 | struct ifmap k_map; |
3056 | |
3057 | if (!ops->ndo_set_config) { |
3058 | err = -EOPNOTSUPP; |
3059 | goto errout; |
3060 | } |
3061 | |
3062 | if (!netif_device_present(dev)) { |
3063 | err = -ENODEV; |
3064 | goto errout; |
3065 | } |
3066 | |
3067 | u_map = nla_data(nla: tb[IFLA_MAP]); |
3068 | k_map.mem_start = (unsigned long) u_map->mem_start; |
3069 | k_map.mem_end = (unsigned long) u_map->mem_end; |
3070 | k_map.base_addr = (unsigned short) u_map->base_addr; |
3071 | k_map.irq = (unsigned char) u_map->irq; |
3072 | k_map.dma = (unsigned char) u_map->dma; |
3073 | k_map.port = (unsigned char) u_map->port; |
3074 | |
3075 | err = ops->ndo_set_config(dev, &k_map); |
3076 | if (err < 0) |
3077 | goto errout; |
3078 | |
3079 | status |= DO_SETLINK_NOTIFY; |
3080 | } |
3081 | |
3082 | if (tb[IFLA_ADDRESS]) { |
3083 | struct sockaddr_storage ss = { }; |
3084 | |
3085 | netdev_unlock_ops(dev); |
3086 | |
3087 | /* dev_addr_sem is an outer lock, enforce proper ordering */ |
3088 | down_write(sem: &dev_addr_sem); |
3089 | netdev_lock_ops(dev); |
3090 | |
3091 | ss.ss_family = dev->type; |
3092 | memcpy(ss.__data, nla_data(tb[IFLA_ADDRESS]), dev->addr_len); |
3093 | err = netif_set_mac_address(dev, ss: &ss, extack); |
3094 | if (err) { |
3095 | up_write(sem: &dev_addr_sem); |
3096 | goto errout; |
3097 | } |
3098 | status |= DO_SETLINK_MODIFIED; |
3099 | |
3100 | up_write(sem: &dev_addr_sem); |
3101 | } |
3102 | |
3103 | if (tb[IFLA_MTU]) { |
3104 | err = netif_set_mtu_ext(dev, new_mtu: nla_get_u32(nla: tb[IFLA_MTU]), extack); |
3105 | if (err < 0) |
3106 | goto errout; |
3107 | status |= DO_SETLINK_MODIFIED; |
3108 | } |
3109 | |
3110 | if (tb[IFLA_GROUP]) { |
3111 | netif_set_group(dev, new_group: nla_get_u32(nla: tb[IFLA_GROUP])); |
3112 | status |= DO_SETLINK_NOTIFY; |
3113 | } |
3114 | |
3115 | /* |
3116 | * Interface selected by interface index but interface |
3117 | * name provided implies that a name change has been |
3118 | * requested. |
3119 | */ |
3120 | if (ifm->ifi_index > 0 && ifname[0]) { |
3121 | err = netif_change_name(dev, newname: ifname); |
3122 | if (err < 0) |
3123 | goto errout; |
3124 | status |= DO_SETLINK_MODIFIED; |
3125 | } |
3126 | |
3127 | if (tb[IFLA_IFALIAS]) { |
3128 | err = netif_set_alias(dev, alias: nla_data(nla: tb[IFLA_IFALIAS]), |
3129 | len: nla_len(nla: tb[IFLA_IFALIAS])); |
3130 | if (err < 0) |
3131 | goto errout; |
3132 | status |= DO_SETLINK_NOTIFY; |
3133 | } |
3134 | |
3135 | if (tb[IFLA_BROADCAST]) { |
3136 | nla_memcpy(dest: dev->broadcast, src: tb[IFLA_BROADCAST], count: dev->addr_len); |
3137 | call_netdevice_notifiers(val: NETDEV_CHANGEADDR, dev); |
3138 | } |
3139 | |
3140 | if (ifm->ifi_flags || ifm->ifi_change) { |
3141 | err = netif_change_flags(dev, flags: rtnl_dev_combine_flags(dev, ifm), |
3142 | extack); |
3143 | if (err < 0) |
3144 | goto errout; |
3145 | } |
3146 | |
3147 | if (tb[IFLA_MASTER]) { |
3148 | err = do_set_master(dev, ifindex: nla_get_u32(nla: tb[IFLA_MASTER]), extack); |
3149 | if (err) |
3150 | goto errout; |
3151 | status |= DO_SETLINK_MODIFIED; |
3152 | } |
3153 | |
3154 | if (tb[IFLA_CARRIER]) { |
3155 | err = netif_change_carrier(dev, new_carrier: nla_get_u8(nla: tb[IFLA_CARRIER])); |
3156 | if (err) |
3157 | goto errout; |
3158 | status |= DO_SETLINK_MODIFIED; |
3159 | } |
3160 | |
3161 | if (tb[IFLA_TXQLEN]) { |
3162 | unsigned int value = nla_get_u32(nla: tb[IFLA_TXQLEN]); |
3163 | |
3164 | err = netif_change_tx_queue_len(dev, new_len: value); |
3165 | if (err) |
3166 | goto errout; |
3167 | status |= DO_SETLINK_MODIFIED; |
3168 | } |
3169 | |
3170 | if (tb[IFLA_GSO_MAX_SIZE]) { |
3171 | u32 max_size = nla_get_u32(nla: tb[IFLA_GSO_MAX_SIZE]); |
3172 | |
3173 | if (dev->gso_max_size ^ max_size) { |
3174 | netif_set_gso_max_size(dev, size: max_size); |
3175 | status |= DO_SETLINK_MODIFIED; |
3176 | } |
3177 | } |
3178 | |
3179 | if (tb[IFLA_GSO_MAX_SEGS]) { |
3180 | u32 max_segs = nla_get_u32(nla: tb[IFLA_GSO_MAX_SEGS]); |
3181 | |
3182 | if (dev->gso_max_segs ^ max_segs) { |
3183 | netif_set_gso_max_segs(dev, segs: max_segs); |
3184 | status |= DO_SETLINK_MODIFIED; |
3185 | } |
3186 | } |
3187 | |
3188 | if (tb[IFLA_GRO_MAX_SIZE]) { |
3189 | u32 gro_max_size = nla_get_u32(nla: tb[IFLA_GRO_MAX_SIZE]); |
3190 | |
3191 | if (dev->gro_max_size ^ gro_max_size) { |
3192 | netif_set_gro_max_size(dev, size: gro_max_size); |
3193 | status |= DO_SETLINK_MODIFIED; |
3194 | } |
3195 | } |
3196 | |
3197 | if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { |
3198 | u32 max_size = nla_get_u32(nla: tb[IFLA_GSO_IPV4_MAX_SIZE]); |
3199 | |
3200 | if (dev->gso_ipv4_max_size ^ max_size) { |
3201 | netif_set_gso_ipv4_max_size(dev, size: max_size); |
3202 | status |= DO_SETLINK_MODIFIED; |
3203 | } |
3204 | } |
3205 | |
3206 | if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { |
3207 | u32 gro_max_size = nla_get_u32(nla: tb[IFLA_GRO_IPV4_MAX_SIZE]); |
3208 | |
3209 | if (dev->gro_ipv4_max_size ^ gro_max_size) { |
3210 | netif_set_gro_ipv4_max_size(dev, size: gro_max_size); |
3211 | status |= DO_SETLINK_MODIFIED; |
3212 | } |
3213 | } |
3214 | |
3215 | if (tb[IFLA_OPERSTATE]) |
3216 | set_operstate(dev, transition: nla_get_u8(nla: tb[IFLA_OPERSTATE])); |
3217 | |
3218 | if (tb[IFLA_LINKMODE]) { |
3219 | unsigned char value = nla_get_u8(nla: tb[IFLA_LINKMODE]); |
3220 | |
3221 | if (dev->link_mode ^ value) |
3222 | status |= DO_SETLINK_NOTIFY; |
3223 | WRITE_ONCE(dev->link_mode, value); |
3224 | } |
3225 | |
3226 | if (tb[IFLA_VFINFO_LIST]) { |
3227 | struct nlattr *vfinfo[IFLA_VF_MAX + 1]; |
3228 | struct nlattr *attr; |
3229 | int rem; |
3230 | |
3231 | nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { |
3232 | if (nla_type(nla: attr) != IFLA_VF_INFO || |
3233 | nla_len(nla: attr) < NLA_HDRLEN) { |
3234 | err = -EINVAL; |
3235 | goto errout; |
3236 | } |
3237 | err = nla_parse_nested_deprecated(tb: vfinfo, IFLA_VF_MAX, |
3238 | nla: attr, |
3239 | policy: ifla_vf_policy, |
3240 | NULL); |
3241 | if (err < 0) |
3242 | goto errout; |
3243 | err = do_setvfinfo(dev, tb: vfinfo); |
3244 | if (err < 0) |
3245 | goto errout; |
3246 | status |= DO_SETLINK_NOTIFY; |
3247 | } |
3248 | } |
3249 | err = 0; |
3250 | |
3251 | if (tb[IFLA_VF_PORTS]) { |
3252 | struct nlattr *port[IFLA_PORT_MAX+1]; |
3253 | struct nlattr *attr; |
3254 | int vf; |
3255 | int rem; |
3256 | |
3257 | err = -EOPNOTSUPP; |
3258 | if (!ops->ndo_set_vf_port) |
3259 | goto errout; |
3260 | |
3261 | nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { |
3262 | if (nla_type(nla: attr) != IFLA_VF_PORT || |
3263 | nla_len(nla: attr) < NLA_HDRLEN) { |
3264 | err = -EINVAL; |
3265 | goto errout; |
3266 | } |
3267 | err = nla_parse_nested_deprecated(tb: port, IFLA_PORT_MAX, |
3268 | nla: attr, |
3269 | policy: ifla_port_policy, |
3270 | NULL); |
3271 | if (err < 0) |
3272 | goto errout; |
3273 | if (!port[IFLA_PORT_VF]) { |
3274 | err = -EOPNOTSUPP; |
3275 | goto errout; |
3276 | } |
3277 | vf = nla_get_u32(nla: port[IFLA_PORT_VF]); |
3278 | err = ops->ndo_set_vf_port(dev, vf, port); |
3279 | if (err < 0) |
3280 | goto errout; |
3281 | status |= DO_SETLINK_NOTIFY; |
3282 | } |
3283 | } |
3284 | err = 0; |
3285 | |
3286 | if (tb[IFLA_PORT_SELF]) { |
3287 | struct nlattr *port[IFLA_PORT_MAX+1]; |
3288 | |
3289 | err = nla_parse_nested_deprecated(tb: port, IFLA_PORT_MAX, |
3290 | nla: tb[IFLA_PORT_SELF], |
3291 | policy: ifla_port_policy, NULL); |
3292 | if (err < 0) |
3293 | goto errout; |
3294 | |
3295 | err = -EOPNOTSUPP; |
3296 | if (ops->ndo_set_vf_port) |
3297 | err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); |
3298 | if (err < 0) |
3299 | goto errout; |
3300 | status |= DO_SETLINK_NOTIFY; |
3301 | } |
3302 | |
3303 | if (tb[IFLA_AF_SPEC]) { |
3304 | struct nlattr *af; |
3305 | int rem; |
3306 | |
3307 | nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { |
3308 | struct rtnl_af_ops *af_ops; |
3309 | int af_ops_srcu_index; |
3310 | |
3311 | af_ops = rtnl_af_lookup(family: nla_type(nla: af), srcu_index: &af_ops_srcu_index); |
3312 | if (!af_ops) { |
3313 | err = -EAFNOSUPPORT; |
3314 | goto errout; |
3315 | } |
3316 | |
3317 | err = af_ops->set_link_af(dev, af, extack); |
3318 | rtnl_af_put(ops: af_ops, srcu_index: af_ops_srcu_index); |
3319 | |
3320 | if (err < 0) |
3321 | goto errout; |
3322 | |
3323 | status |= DO_SETLINK_NOTIFY; |
3324 | } |
3325 | } |
3326 | err = 0; |
3327 | |
3328 | if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { |
3329 | err = do_set_proto_down(dev, nl_proto_down: tb[IFLA_PROTO_DOWN], |
3330 | nl_proto_down_reason: tb[IFLA_PROTO_DOWN_REASON], extack); |
3331 | if (err) |
3332 | goto errout; |
3333 | status |= DO_SETLINK_NOTIFY; |
3334 | } |
3335 | |
3336 | if (tb[IFLA_XDP]) { |
3337 | struct nlattr *xdp[IFLA_XDP_MAX + 1]; |
3338 | u32 xdp_flags = 0; |
3339 | |
3340 | err = nla_parse_nested_deprecated(tb: xdp, IFLA_XDP_MAX, |
3341 | nla: tb[IFLA_XDP], |
3342 | policy: ifla_xdp_policy, NULL); |
3343 | if (err < 0) |
3344 | goto errout; |
3345 | |
3346 | if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { |
3347 | err = -EINVAL; |
3348 | goto errout; |
3349 | } |
3350 | |
3351 | if (xdp[IFLA_XDP_FLAGS]) { |
3352 | xdp_flags = nla_get_u32(nla: xdp[IFLA_XDP_FLAGS]); |
3353 | if (xdp_flags & ~XDP_FLAGS_MASK) { |
3354 | err = -EINVAL; |
3355 | goto errout; |
3356 | } |
3357 | if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { |
3358 | err = -EINVAL; |
3359 | goto errout; |
3360 | } |
3361 | } |
3362 | |
3363 | if (xdp[IFLA_XDP_FD]) { |
3364 | int expected_fd = -1; |
3365 | |
3366 | if (xdp_flags & XDP_FLAGS_REPLACE) { |
3367 | if (!xdp[IFLA_XDP_EXPECTED_FD]) { |
3368 | err = -EINVAL; |
3369 | goto errout; |
3370 | } |
3371 | expected_fd = |
3372 | nla_get_s32(nla: xdp[IFLA_XDP_EXPECTED_FD]); |
3373 | } |
3374 | |
3375 | err = dev_change_xdp_fd(dev, extack, |
3376 | fd: nla_get_s32(nla: xdp[IFLA_XDP_FD]), |
3377 | expected_fd, |
3378 | flags: xdp_flags); |
3379 | if (err) |
3380 | goto errout; |
3381 | status |= DO_SETLINK_NOTIFY; |
3382 | } |
3383 | } |
3384 | |
3385 | errout: |
3386 | if (status & DO_SETLINK_MODIFIED) { |
3387 | if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) |
3388 | netif_state_change(dev); |
3389 | |
3390 | if (err < 0) |
3391 | net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n" , |
3392 | dev->name); |
3393 | } |
3394 | |
3395 | netdev_unlock_ops(dev); |
3396 | |
3397 | return err; |
3398 | } |
3399 | |
3400 | static struct net_device *rtnl_dev_get(struct net *net, |
3401 | struct nlattr *tb[]) |
3402 | { |
3403 | char ifname[ALTIFNAMSIZ]; |
3404 | |
3405 | if (tb[IFLA_IFNAME]) |
3406 | nla_strscpy(dst: ifname, nla: tb[IFLA_IFNAME], IFNAMSIZ); |
3407 | else if (tb[IFLA_ALT_IFNAME]) |
3408 | nla_strscpy(dst: ifname, nla: tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); |
3409 | else |
3410 | return NULL; |
3411 | |
3412 | return __dev_get_by_name(net, name: ifname); |
3413 | } |
3414 | |
3415 | static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, |
3416 | struct netlink_ext_ack *extack) |
3417 | { |
3418 | struct ifinfomsg *ifm = nlmsg_data(nlh); |
3419 | struct net *net = sock_net(sk: skb->sk); |
3420 | struct nlattr *tb[IFLA_MAX+1]; |
3421 | struct net_device *dev = NULL; |
3422 | struct rtnl_nets rtnl_nets; |
3423 | struct net *tgt_net; |
3424 | int err; |
3425 | |
3426 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*ifm), tb, IFLA_MAX, |
3427 | policy: ifla_policy, extack); |
3428 | if (err < 0) |
3429 | goto errout; |
3430 | |
3431 | err = rtnl_ensure_unique_netns(tb, extack, netns_id_only: false); |
3432 | if (err < 0) |
3433 | goto errout; |
3434 | |
3435 | tgt_net = rtnl_link_get_net_capable(skb, src_net: net, tb, CAP_NET_ADMIN); |
3436 | if (IS_ERR(ptr: tgt_net)) { |
3437 | err = PTR_ERR(ptr: tgt_net); |
3438 | goto errout; |
3439 | } |
3440 | |
3441 | rtnl_nets_init(rtnl_nets: &rtnl_nets); |
3442 | rtnl_nets_add(rtnl_nets: &rtnl_nets, net: get_net(net)); |
3443 | rtnl_nets_add(rtnl_nets: &rtnl_nets, net: tgt_net); |
3444 | |
3445 | rtnl_nets_lock(rtnl_nets: &rtnl_nets); |
3446 | |
3447 | if (ifm->ifi_index > 0) |
3448 | dev = __dev_get_by_index(net, ifindex: ifm->ifi_index); |
3449 | else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) |
3450 | dev = rtnl_dev_get(net, tb); |
3451 | else |
3452 | err = -EINVAL; |
3453 | |
3454 | if (dev) |
3455 | err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, status: 0); |
3456 | else if (!err) |
3457 | err = -ENODEV; |
3458 | |
3459 | rtnl_nets_unlock(rtnl_nets: &rtnl_nets); |
3460 | rtnl_nets_destroy(rtnl_nets: &rtnl_nets); |
3461 | errout: |
3462 | return err; |
3463 | } |
3464 | |
3465 | static int rtnl_group_dellink(const struct net *net, int group) |
3466 | { |
3467 | struct net_device *dev, *aux; |
3468 | LIST_HEAD(list_kill); |
3469 | bool found = false; |
3470 | |
3471 | if (!group) |
3472 | return -EPERM; |
3473 | |
3474 | for_each_netdev(net, dev) { |
3475 | if (dev->group == group) { |
3476 | const struct rtnl_link_ops *ops; |
3477 | |
3478 | found = true; |
3479 | ops = dev->rtnl_link_ops; |
3480 | if (!ops || !ops->dellink) |
3481 | return -EOPNOTSUPP; |
3482 | } |
3483 | } |
3484 | |
3485 | if (!found) |
3486 | return -ENODEV; |
3487 | |
3488 | for_each_netdev_safe(net, dev, aux) { |
3489 | if (dev->group == group) { |
3490 | const struct rtnl_link_ops *ops; |
3491 | |
3492 | ops = dev->rtnl_link_ops; |
3493 | ops->dellink(dev, &list_kill); |
3494 | } |
3495 | } |
3496 | unregister_netdevice_many(head: &list_kill); |
3497 | |
3498 | return 0; |
3499 | } |
3500 | |
3501 | int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) |
3502 | { |
3503 | const struct rtnl_link_ops *ops; |
3504 | LIST_HEAD(list_kill); |
3505 | |
3506 | ops = dev->rtnl_link_ops; |
3507 | if (!ops || !ops->dellink) |
3508 | return -EOPNOTSUPP; |
3509 | |
3510 | ops->dellink(dev, &list_kill); |
3511 | unregister_netdevice_many_notify(head: &list_kill, portid, nlh); |
3512 | |
3513 | return 0; |
3514 | } |
3515 | EXPORT_SYMBOL_GPL(rtnl_delete_link); |
3516 | |
3517 | static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, |
3518 | struct netlink_ext_ack *extack) |
3519 | { |
3520 | struct ifinfomsg *ifm = nlmsg_data(nlh); |
3521 | struct net *net = sock_net(sk: skb->sk); |
3522 | u32 portid = NETLINK_CB(skb).portid; |
3523 | struct nlattr *tb[IFLA_MAX+1]; |
3524 | struct net_device *dev = NULL; |
3525 | struct net *tgt_net = net; |
3526 | int netnsid = -1; |
3527 | int err; |
3528 | |
3529 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*ifm), tb, IFLA_MAX, |
3530 | policy: ifla_policy, extack); |
3531 | if (err < 0) |
3532 | return err; |
3533 | |
3534 | err = rtnl_ensure_unique_netns(tb, extack, netns_id_only: true); |
3535 | if (err < 0) |
3536 | return err; |
3537 | |
3538 | if (tb[IFLA_TARGET_NETNSID]) { |
3539 | netnsid = nla_get_s32(nla: tb[IFLA_TARGET_NETNSID]); |
3540 | tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); |
3541 | if (IS_ERR(ptr: tgt_net)) |
3542 | return PTR_ERR(ptr: tgt_net); |
3543 | } |
3544 | |
3545 | rtnl_net_lock(tgt_net); |
3546 | |
3547 | if (ifm->ifi_index > 0) |
3548 | dev = __dev_get_by_index(net: tgt_net, ifindex: ifm->ifi_index); |
3549 | else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) |
3550 | dev = rtnl_dev_get(net: tgt_net, tb); |
3551 | |
3552 | if (dev) |
3553 | err = rtnl_delete_link(dev, portid, nlh); |
3554 | else if (ifm->ifi_index > 0 || tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) |
3555 | err = -ENODEV; |
3556 | else if (tb[IFLA_GROUP]) |
3557 | err = rtnl_group_dellink(net: tgt_net, group: nla_get_u32(nla: tb[IFLA_GROUP])); |
3558 | else |
3559 | err = -EINVAL; |
3560 | |
3561 | rtnl_net_unlock(tgt_net); |
3562 | |
3563 | if (netnsid >= 0) |
3564 | put_net(net: tgt_net); |
3565 | |
3566 | return err; |
3567 | } |
3568 | |
3569 | int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, |
3570 | u32 portid, const struct nlmsghdr *nlh) |
3571 | { |
3572 | unsigned int old_flags, changed; |
3573 | int err; |
3574 | |
3575 | old_flags = dev->flags; |
3576 | if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { |
3577 | err = __dev_change_flags(dev, flags: rtnl_dev_combine_flags(dev, ifm), |
3578 | NULL); |
3579 | if (err < 0) |
3580 | return err; |
3581 | } |
3582 | |
3583 | changed = old_flags ^ dev->flags; |
3584 | if (dev->rtnl_link_initializing) { |
3585 | dev->rtnl_link_initializing = false; |
3586 | changed = ~0U; |
3587 | } |
3588 | |
3589 | __dev_notify_flags(dev, old_flags, gchanges: changed, portid, nlh); |
3590 | return 0; |
3591 | } |
3592 | EXPORT_SYMBOL(rtnl_configure_link); |
3593 | |
3594 | struct net_device *rtnl_create_link(struct net *net, const char *ifname, |
3595 | unsigned char name_assign_type, |
3596 | const struct rtnl_link_ops *ops, |
3597 | struct nlattr *tb[], |
3598 | struct netlink_ext_ack *extack) |
3599 | { |
3600 | struct net_device *dev; |
3601 | unsigned int num_tx_queues = 1; |
3602 | unsigned int num_rx_queues = 1; |
3603 | int err; |
3604 | |
3605 | if (tb[IFLA_NUM_TX_QUEUES]) |
3606 | num_tx_queues = nla_get_u32(nla: tb[IFLA_NUM_TX_QUEUES]); |
3607 | else if (ops->get_num_tx_queues) |
3608 | num_tx_queues = ops->get_num_tx_queues(); |
3609 | |
3610 | if (tb[IFLA_NUM_RX_QUEUES]) |
3611 | num_rx_queues = nla_get_u32(nla: tb[IFLA_NUM_RX_QUEUES]); |
3612 | else if (ops->get_num_rx_queues) |
3613 | num_rx_queues = ops->get_num_rx_queues(); |
3614 | |
3615 | if (num_tx_queues < 1 || num_tx_queues > 4096) { |
3616 | NL_SET_ERR_MSG(extack, "Invalid number of transmit queues" ); |
3617 | return ERR_PTR(error: -EINVAL); |
3618 | } |
3619 | |
3620 | if (num_rx_queues < 1 || num_rx_queues > 4096) { |
3621 | NL_SET_ERR_MSG(extack, "Invalid number of receive queues" ); |
3622 | return ERR_PTR(error: -EINVAL); |
3623 | } |
3624 | |
3625 | if (ops->alloc) { |
3626 | dev = ops->alloc(tb, ifname, name_assign_type, |
3627 | num_tx_queues, num_rx_queues); |
3628 | if (IS_ERR(ptr: dev)) |
3629 | return dev; |
3630 | } else { |
3631 | dev = alloc_netdev_mqs(sizeof_priv: ops->priv_size, name: ifname, |
3632 | name_assign_type, setup: ops->setup, |
3633 | txqs: num_tx_queues, rxqs: num_rx_queues); |
3634 | } |
3635 | |
3636 | if (!dev) |
3637 | return ERR_PTR(error: -ENOMEM); |
3638 | |
3639 | err = validate_linkmsg(dev, tb, extack); |
3640 | if (err < 0) { |
3641 | free_netdev(dev); |
3642 | return ERR_PTR(error: err); |
3643 | } |
3644 | |
3645 | dev_net_set(dev, net); |
3646 | dev->rtnl_link_ops = ops; |
3647 | dev->rtnl_link_initializing = true; |
3648 | |
3649 | if (tb[IFLA_MTU]) { |
3650 | u32 mtu = nla_get_u32(nla: tb[IFLA_MTU]); |
3651 | |
3652 | err = dev_validate_mtu(dev, mtu, extack); |
3653 | if (err) { |
3654 | free_netdev(dev); |
3655 | return ERR_PTR(error: err); |
3656 | } |
3657 | dev->mtu = mtu; |
3658 | } |
3659 | if (tb[IFLA_ADDRESS]) { |
3660 | __dev_addr_set(dev, addr: nla_data(nla: tb[IFLA_ADDRESS]), |
3661 | len: nla_len(nla: tb[IFLA_ADDRESS])); |
3662 | dev->addr_assign_type = NET_ADDR_SET; |
3663 | } |
3664 | if (tb[IFLA_BROADCAST]) |
3665 | memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), |
3666 | nla_len(tb[IFLA_BROADCAST])); |
3667 | if (tb[IFLA_TXQLEN]) |
3668 | dev->tx_queue_len = nla_get_u32(nla: tb[IFLA_TXQLEN]); |
3669 | if (tb[IFLA_OPERSTATE]) |
3670 | set_operstate(dev, transition: nla_get_u8(nla: tb[IFLA_OPERSTATE])); |
3671 | if (tb[IFLA_LINKMODE]) |
3672 | dev->link_mode = nla_get_u8(nla: tb[IFLA_LINKMODE]); |
3673 | if (tb[IFLA_GROUP]) |
3674 | netif_set_group(dev, new_group: nla_get_u32(nla: tb[IFLA_GROUP])); |
3675 | if (tb[IFLA_GSO_MAX_SIZE]) |
3676 | netif_set_gso_max_size(dev, size: nla_get_u32(nla: tb[IFLA_GSO_MAX_SIZE])); |
3677 | if (tb[IFLA_GSO_MAX_SEGS]) |
3678 | netif_set_gso_max_segs(dev, segs: nla_get_u32(nla: tb[IFLA_GSO_MAX_SEGS])); |
3679 | if (tb[IFLA_GRO_MAX_SIZE]) |
3680 | netif_set_gro_max_size(dev, size: nla_get_u32(nla: tb[IFLA_GRO_MAX_SIZE])); |
3681 | if (tb[IFLA_GSO_IPV4_MAX_SIZE]) |
3682 | netif_set_gso_ipv4_max_size(dev, size: nla_get_u32(nla: tb[IFLA_GSO_IPV4_MAX_SIZE])); |
3683 | if (tb[IFLA_GRO_IPV4_MAX_SIZE]) |
3684 | netif_set_gro_ipv4_max_size(dev, size: nla_get_u32(nla: tb[IFLA_GRO_IPV4_MAX_SIZE])); |
3685 | |
3686 | return dev; |
3687 | } |
3688 | EXPORT_SYMBOL(rtnl_create_link); |
3689 | |
3690 | struct rtnl_newlink_tbs { |
3691 | struct nlattr *tb[IFLA_MAX + 1]; |
3692 | struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; |
3693 | struct nlattr *attr[RTNL_MAX_TYPE + 1]; |
3694 | struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; |
3695 | }; |
3696 | |
3697 | static int rtnl_changelink(const struct sk_buff *skb, struct nlmsghdr *nlh, |
3698 | const struct rtnl_link_ops *ops, |
3699 | struct net_device *dev, struct net *tgt_net, |
3700 | struct rtnl_newlink_tbs *tbs, |
3701 | struct nlattr **data, |
3702 | struct netlink_ext_ack *extack) |
3703 | { |
3704 | struct nlattr ** const linkinfo = tbs->linkinfo; |
3705 | struct nlattr ** const tb = tbs->tb; |
3706 | int status = 0; |
3707 | int err; |
3708 | |
3709 | if (nlh->nlmsg_flags & NLM_F_EXCL) |
3710 | return -EEXIST; |
3711 | |
3712 | if (nlh->nlmsg_flags & NLM_F_REPLACE) |
3713 | return -EOPNOTSUPP; |
3714 | |
3715 | if (linkinfo[IFLA_INFO_DATA]) { |
3716 | if (!ops || ops != dev->rtnl_link_ops || !ops->changelink) |
3717 | return -EOPNOTSUPP; |
3718 | |
3719 | err = ops->changelink(dev, tb, data, extack); |
3720 | if (err < 0) |
3721 | return err; |
3722 | |
3723 | status |= DO_SETLINK_NOTIFY; |
3724 | } |
3725 | |
3726 | if (linkinfo[IFLA_INFO_SLAVE_DATA]) { |
3727 | const struct rtnl_link_ops *m_ops = NULL; |
3728 | struct nlattr **slave_data = NULL; |
3729 | struct net_device *master_dev; |
3730 | |
3731 | master_dev = netdev_master_upper_dev_get(dev); |
3732 | if (master_dev) |
3733 | m_ops = master_dev->rtnl_link_ops; |
3734 | |
3735 | if (!m_ops || !m_ops->slave_changelink) |
3736 | return -EOPNOTSUPP; |
3737 | |
3738 | if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) |
3739 | return -EINVAL; |
3740 | |
3741 | if (m_ops->slave_maxtype) { |
3742 | err = nla_parse_nested_deprecated(tb: tbs->slave_attr, |
3743 | maxtype: m_ops->slave_maxtype, |
3744 | nla: linkinfo[IFLA_INFO_SLAVE_DATA], |
3745 | policy: m_ops->slave_policy, extack); |
3746 | if (err < 0) |
3747 | return err; |
3748 | |
3749 | slave_data = tbs->slave_attr; |
3750 | } |
3751 | |
3752 | err = m_ops->slave_changelink(master_dev, dev, tb, slave_data, extack); |
3753 | if (err < 0) |
3754 | return err; |
3755 | |
3756 | status |= DO_SETLINK_NOTIFY; |
3757 | } |
3758 | |
3759 | return do_setlink(skb, dev, tgt_net, ifm: nlmsg_data(nlh), extack, tb, status); |
3760 | } |
3761 | |
3762 | static int rtnl_group_changelink(const struct sk_buff *skb, |
3763 | struct net *net, struct net *tgt_net, |
3764 | int group, struct ifinfomsg *ifm, |
3765 | struct netlink_ext_ack *extack, |
3766 | struct nlattr **tb) |
3767 | { |
3768 | struct net_device *dev, *aux; |
3769 | int err; |
3770 | |
3771 | for_each_netdev_safe(net, dev, aux) { |
3772 | if (dev->group == group) { |
3773 | err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, status: 0); |
3774 | if (err < 0) |
3775 | return err; |
3776 | } |
3777 | } |
3778 | |
3779 | return 0; |
3780 | } |
3781 | |
3782 | static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, |
3783 | const struct rtnl_link_ops *ops, |
3784 | struct net *tgt_net, struct net *link_net, |
3785 | struct net *peer_net, |
3786 | const struct nlmsghdr *nlh, |
3787 | struct nlattr **tb, struct nlattr **data, |
3788 | struct netlink_ext_ack *extack) |
3789 | { |
3790 | unsigned char name_assign_type = NET_NAME_USER; |
3791 | struct rtnl_newlink_params params = { |
3792 | .src_net = sock_net(sk: skb->sk), |
3793 | .link_net = link_net, |
3794 | .peer_net = peer_net, |
3795 | .tb = tb, |
3796 | .data = data, |
3797 | }; |
3798 | u32 portid = NETLINK_CB(skb).portid; |
3799 | struct net_device *dev; |
3800 | char ifname[IFNAMSIZ]; |
3801 | int err; |
3802 | |
3803 | if (!ops->alloc && !ops->setup) |
3804 | return -EOPNOTSUPP; |
3805 | |
3806 | if (tb[IFLA_IFNAME]) { |
3807 | nla_strscpy(dst: ifname, nla: tb[IFLA_IFNAME], IFNAMSIZ); |
3808 | } else { |
3809 | snprintf(buf: ifname, IFNAMSIZ, fmt: "%s%%d" , ops->kind); |
3810 | name_assign_type = NET_NAME_ENUM; |
3811 | } |
3812 | |
3813 | dev = rtnl_create_link(tgt_net, ifname, name_assign_type, ops, tb, |
3814 | extack); |
3815 | if (IS_ERR(ptr: dev)) { |
3816 | err = PTR_ERR(ptr: dev); |
3817 | goto out; |
3818 | } |
3819 | |
3820 | dev->ifindex = ifm->ifi_index; |
3821 | |
3822 | if (ops->newlink) |
3823 | err = ops->newlink(dev, ¶ms, extack); |
3824 | else |
3825 | err = register_netdevice(dev); |
3826 | if (err < 0) { |
3827 | free_netdev(dev); |
3828 | goto out; |
3829 | } |
3830 | |
3831 | netdev_lock_ops(dev); |
3832 | |
3833 | err = rtnl_configure_link(dev, ifm, portid, nlh); |
3834 | if (err < 0) |
3835 | goto out_unregister; |
3836 | if (tb[IFLA_MASTER]) { |
3837 | err = do_set_master(dev, ifindex: nla_get_u32(nla: tb[IFLA_MASTER]), extack); |
3838 | if (err) |
3839 | goto out_unregister; |
3840 | } |
3841 | |
3842 | netdev_unlock_ops(dev); |
3843 | out: |
3844 | return err; |
3845 | out_unregister: |
3846 | netdev_unlock_ops(dev); |
3847 | if (ops->newlink) { |
3848 | LIST_HEAD(list_kill); |
3849 | |
3850 | ops->dellink(dev, &list_kill); |
3851 | unregister_netdevice_many(head: &list_kill); |
3852 | } else { |
3853 | unregister_netdevice(dev); |
3854 | } |
3855 | goto out; |
3856 | } |
3857 | |
3858 | static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops, |
3859 | struct nlattr *tbp[], |
3860 | struct nlattr *data[], |
3861 | struct netlink_ext_ack *extack) |
3862 | { |
3863 | struct nlattr *tb[IFLA_MAX + 1]; |
3864 | int err; |
3865 | |
3866 | if (!data || !data[ops->peer_type]) |
3867 | return rtnl_link_get_net_ifla(tb: tbp); |
3868 | |
3869 | err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack); |
3870 | if (err < 0) |
3871 | return ERR_PTR(error: err); |
3872 | |
3873 | if (ops->validate) { |
3874 | err = ops->validate(tb, NULL, extack); |
3875 | if (err < 0) |
3876 | return ERR_PTR(error: err); |
3877 | } |
3878 | |
3879 | return rtnl_link_get_net_ifla(tb); |
3880 | } |
3881 | |
3882 | static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, |
3883 | const struct rtnl_link_ops *ops, |
3884 | struct net *tgt_net, struct net *link_net, |
3885 | struct net *peer_net, |
3886 | struct rtnl_newlink_tbs *tbs, |
3887 | struct nlattr **data, |
3888 | struct netlink_ext_ack *extack) |
3889 | { |
3890 | struct nlattr ** const tb = tbs->tb; |
3891 | struct net *net = sock_net(sk: skb->sk); |
3892 | struct net *device_net; |
3893 | struct net_device *dev; |
3894 | struct ifinfomsg *ifm; |
3895 | bool link_specified; |
3896 | |
3897 | /* When creating, lookup for existing device in target net namespace */ |
3898 | device_net = (nlh->nlmsg_flags & NLM_F_CREATE) && |
3899 | (nlh->nlmsg_flags & NLM_F_EXCL) ? |
3900 | tgt_net : net; |
3901 | |
3902 | ifm = nlmsg_data(nlh); |
3903 | if (ifm->ifi_index > 0) { |
3904 | link_specified = true; |
3905 | dev = __dev_get_by_index(net: device_net, ifindex: ifm->ifi_index); |
3906 | } else if (ifm->ifi_index < 0) { |
3907 | NL_SET_ERR_MSG(extack, "ifindex can't be negative" ); |
3908 | return -EINVAL; |
3909 | } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { |
3910 | link_specified = true; |
3911 | dev = rtnl_dev_get(net: device_net, tb); |
3912 | } else { |
3913 | link_specified = false; |
3914 | dev = NULL; |
3915 | } |
3916 | |
3917 | if (dev) |
3918 | return rtnl_changelink(skb, nlh, ops, dev, tgt_net, tbs, data, extack); |
3919 | |
3920 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { |
3921 | /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, |
3922 | * or it's for a group |
3923 | */ |
3924 | if (link_specified || !tb[IFLA_GROUP]) |
3925 | return -ENODEV; |
3926 | |
3927 | return rtnl_group_changelink(skb, net, tgt_net, |
3928 | group: nla_get_u32(nla: tb[IFLA_GROUP]), |
3929 | ifm, extack, tb); |
3930 | } |
3931 | |
3932 | if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) |
3933 | return -EOPNOTSUPP; |
3934 | |
3935 | if (!ops) { |
3936 | NL_SET_ERR_MSG(extack, "Unknown device type" ); |
3937 | return -EOPNOTSUPP; |
3938 | } |
3939 | |
3940 | return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh, |
3941 | tb, data, extack); |
3942 | } |
3943 | |
3944 | static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, |
3945 | struct netlink_ext_ack *extack) |
3946 | { |
3947 | struct net *tgt_net, *link_net = NULL, *peer_net = NULL; |
3948 | struct nlattr **tb, **linkinfo, **data = NULL; |
3949 | struct rtnl_link_ops *ops = NULL; |
3950 | struct rtnl_newlink_tbs *tbs; |
3951 | struct rtnl_nets rtnl_nets; |
3952 | int ops_srcu_index; |
3953 | int ret; |
3954 | |
3955 | tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); |
3956 | if (!tbs) |
3957 | return -ENOMEM; |
3958 | |
3959 | tb = tbs->tb; |
3960 | ret = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(struct ifinfomsg), tb, |
3961 | IFLA_MAX, policy: ifla_policy, extack); |
3962 | if (ret < 0) |
3963 | goto free; |
3964 | |
3965 | ret = rtnl_ensure_unique_netns(tb, extack, netns_id_only: false); |
3966 | if (ret < 0) |
3967 | goto free; |
3968 | |
3969 | linkinfo = tbs->linkinfo; |
3970 | if (tb[IFLA_LINKINFO]) { |
3971 | ret = nla_parse_nested_deprecated(tb: linkinfo, IFLA_INFO_MAX, |
3972 | nla: tb[IFLA_LINKINFO], |
3973 | policy: ifla_info_policy, NULL); |
3974 | if (ret < 0) |
3975 | goto free; |
3976 | } else { |
3977 | memset(linkinfo, 0, sizeof(tbs->linkinfo)); |
3978 | } |
3979 | |
3980 | if (linkinfo[IFLA_INFO_KIND]) { |
3981 | char kind[MODULE_NAME_LEN]; |
3982 | |
3983 | nla_strscpy(dst: kind, nla: linkinfo[IFLA_INFO_KIND], dstsize: sizeof(kind)); |
3984 | ops = rtnl_link_ops_get(kind, srcu_index: &ops_srcu_index); |
3985 | #ifdef CONFIG_MODULES |
3986 | if (!ops) { |
3987 | request_module("rtnl-link-%s" , kind); |
3988 | ops = rtnl_link_ops_get(kind, srcu_index: &ops_srcu_index); |
3989 | } |
3990 | #endif |
3991 | } |
3992 | |
3993 | rtnl_nets_init(rtnl_nets: &rtnl_nets); |
3994 | |
3995 | if (ops) { |
3996 | if (ops->maxtype > RTNL_MAX_TYPE) { |
3997 | ret = -EINVAL; |
3998 | goto put_ops; |
3999 | } |
4000 | |
4001 | if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { |
4002 | ret = nla_parse_nested_deprecated(tb: tbs->attr, maxtype: ops->maxtype, |
4003 | nla: linkinfo[IFLA_INFO_DATA], |
4004 | policy: ops->policy, extack); |
4005 | if (ret < 0) |
4006 | goto put_ops; |
4007 | |
4008 | data = tbs->attr; |
4009 | } |
4010 | |
4011 | if (ops->validate) { |
4012 | ret = ops->validate(tb, data, extack); |
4013 | if (ret < 0) |
4014 | goto put_ops; |
4015 | } |
4016 | |
4017 | if (ops->peer_type) { |
4018 | peer_net = rtnl_get_peer_net(ops, tbp: tb, data, extack); |
4019 | if (IS_ERR(ptr: peer_net)) { |
4020 | ret = PTR_ERR(ptr: peer_net); |
4021 | goto put_ops; |
4022 | } |
4023 | if (peer_net) |
4024 | rtnl_nets_add(rtnl_nets: &rtnl_nets, net: peer_net); |
4025 | } |
4026 | } |
4027 | |
4028 | tgt_net = rtnl_link_get_net_capable(skb, src_net: sock_net(sk: skb->sk), tb, CAP_NET_ADMIN); |
4029 | if (IS_ERR(ptr: tgt_net)) { |
4030 | ret = PTR_ERR(ptr: tgt_net); |
4031 | goto put_net; |
4032 | } |
4033 | |
4034 | rtnl_nets_add(rtnl_nets: &rtnl_nets, net: tgt_net); |
4035 | |
4036 | if (tb[IFLA_LINK_NETNSID]) { |
4037 | int id = nla_get_s32(nla: tb[IFLA_LINK_NETNSID]); |
4038 | |
4039 | link_net = get_net_ns_by_id(net: tgt_net, id); |
4040 | if (!link_net) { |
4041 | NL_SET_ERR_MSG(extack, "Unknown network namespace id" ); |
4042 | ret = -EINVAL; |
4043 | goto put_net; |
4044 | } |
4045 | |
4046 | rtnl_nets_add(rtnl_nets: &rtnl_nets, net: link_net); |
4047 | |
4048 | if (!netlink_ns_capable(skb, ns: link_net->user_ns, CAP_NET_ADMIN)) { |
4049 | ret = -EPERM; |
4050 | goto put_net; |
4051 | } |
4052 | } |
4053 | |
4054 | rtnl_nets_lock(rtnl_nets: &rtnl_nets); |
4055 | ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack); |
4056 | rtnl_nets_unlock(rtnl_nets: &rtnl_nets); |
4057 | |
4058 | put_net: |
4059 | rtnl_nets_destroy(rtnl_nets: &rtnl_nets); |
4060 | put_ops: |
4061 | if (ops) |
4062 | rtnl_link_ops_put(ops, srcu_index: ops_srcu_index); |
4063 | free: |
4064 | kfree(objp: tbs); |
4065 | return ret; |
4066 | } |
4067 | |
4068 | static int rtnl_valid_getlink_req(struct sk_buff *skb, |
4069 | const struct nlmsghdr *nlh, |
4070 | struct nlattr **tb, |
4071 | struct netlink_ext_ack *extack) |
4072 | { |
4073 | struct ifinfomsg *ifm; |
4074 | int i, err; |
4075 | |
4076 | ifm = nlmsg_payload(nlh, len: sizeof(*ifm)); |
4077 | if (!ifm) { |
4078 | NL_SET_ERR_MSG(extack, "Invalid header for get link" ); |
4079 | return -EINVAL; |
4080 | } |
4081 | |
4082 | if (!netlink_strict_get_check(skb)) |
4083 | return nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*ifm), tb, IFLA_MAX, |
4084 | policy: ifla_policy, extack); |
4085 | |
4086 | if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || |
4087 | ifm->ifi_change) { |
4088 | NL_SET_ERR_MSG(extack, "Invalid values in header for get link request" ); |
4089 | return -EINVAL; |
4090 | } |
4091 | |
4092 | err = nlmsg_parse_deprecated_strict(nlh, hdrlen: sizeof(*ifm), tb, IFLA_MAX, |
4093 | policy: ifla_policy, extack); |
4094 | if (err) |
4095 | return err; |
4096 | |
4097 | for (i = 0; i <= IFLA_MAX; i++) { |
4098 | if (!tb[i]) |
4099 | continue; |
4100 | |
4101 | switch (i) { |
4102 | case IFLA_IFNAME: |
4103 | case IFLA_ALT_IFNAME: |
4104 | case IFLA_EXT_MASK: |
4105 | case IFLA_TARGET_NETNSID: |
4106 | break; |
4107 | default: |
4108 | NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request" ); |
4109 | return -EINVAL; |
4110 | } |
4111 | } |
4112 | |
4113 | return 0; |
4114 | } |
4115 | |
4116 | static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, |
4117 | struct netlink_ext_ack *extack) |
4118 | { |
4119 | struct net *net = sock_net(sk: skb->sk); |
4120 | struct net *tgt_net = net; |
4121 | struct ifinfomsg *ifm; |
4122 | struct nlattr *tb[IFLA_MAX+1]; |
4123 | struct net_device *dev = NULL; |
4124 | struct sk_buff *nskb; |
4125 | int netnsid = -1; |
4126 | int err; |
4127 | u32 ext_filter_mask = 0; |
4128 | |
4129 | err = rtnl_valid_getlink_req(skb, nlh, tb, extack); |
4130 | if (err < 0) |
4131 | return err; |
4132 | |
4133 | err = rtnl_ensure_unique_netns(tb, extack, netns_id_only: true); |
4134 | if (err < 0) |
4135 | return err; |
4136 | |
4137 | if (tb[IFLA_TARGET_NETNSID]) { |
4138 | netnsid = nla_get_s32(nla: tb[IFLA_TARGET_NETNSID]); |
4139 | tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); |
4140 | if (IS_ERR(ptr: tgt_net)) |
4141 | return PTR_ERR(ptr: tgt_net); |
4142 | } |
4143 | |
4144 | if (tb[IFLA_EXT_MASK]) |
4145 | ext_filter_mask = nla_get_u32(nla: tb[IFLA_EXT_MASK]); |
4146 | |
4147 | err = -EINVAL; |
4148 | ifm = nlmsg_data(nlh); |
4149 | if (ifm->ifi_index > 0) |
4150 | dev = __dev_get_by_index(net: tgt_net, ifindex: ifm->ifi_index); |
4151 | else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) |
4152 | dev = rtnl_dev_get(net: tgt_net, tb); |
4153 | else |
4154 | goto out; |
4155 | |
4156 | err = -ENODEV; |
4157 | if (dev == NULL) |
4158 | goto out; |
4159 | |
4160 | err = -ENOBUFS; |
4161 | nskb = nlmsg_new_large(payload: if_nlmsg_size(dev, ext_filter_mask)); |
4162 | if (nskb == NULL) |
4163 | goto out; |
4164 | |
4165 | /* Synchronize the carrier state so we don't report a state |
4166 | * that we're not actually going to honour immediately; if |
4167 | * the driver just did a carrier off->on transition, we can |
4168 | * only TX if link watch work has run, but without this we'd |
4169 | * already report carrier on, even if it doesn't work yet. |
4170 | */ |
4171 | linkwatch_sync_dev(dev); |
4172 | |
4173 | err = rtnl_fill_ifinfo(skb: nskb, dev, src_net: net, |
4174 | RTM_NEWLINK, NETLINK_CB(skb).portid, |
4175 | seq: nlh->nlmsg_seq, change: 0, flags: 0, ext_filter_mask, |
4176 | event: 0, NULL, new_ifindex: 0, tgt_netnsid: netnsid, GFP_KERNEL); |
4177 | if (err < 0) { |
4178 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ |
4179 | WARN_ON(err == -EMSGSIZE); |
4180 | kfree_skb(skb: nskb); |
4181 | } else |
4182 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); |
4183 | out: |
4184 | if (netnsid >= 0) |
4185 | put_net(net: tgt_net); |
4186 | |
4187 | return err; |
4188 | } |
4189 | |
4190 | static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, |
4191 | bool *changed, struct netlink_ext_ack *extack) |
4192 | { |
4193 | char *alt_ifname; |
4194 | size_t size; |
4195 | int err; |
4196 | |
4197 | err = nla_validate(head: attr, len: attr->nla_len, IFLA_MAX, policy: ifla_policy, extack); |
4198 | if (err) |
4199 | return err; |
4200 | |
4201 | if (cmd == RTM_NEWLINKPROP) { |
4202 | size = rtnl_prop_list_size(dev); |
4203 | size += nla_total_size(ALTIFNAMSIZ); |
4204 | if (size >= U16_MAX) { |
4205 | NL_SET_ERR_MSG(extack, |
4206 | "effective property list too long" ); |
4207 | return -EINVAL; |
4208 | } |
4209 | } |
4210 | |
4211 | alt_ifname = nla_strdup(nla: attr, GFP_KERNEL_ACCOUNT); |
4212 | if (!alt_ifname) |
4213 | return -ENOMEM; |
4214 | |
4215 | if (cmd == RTM_NEWLINKPROP) { |
4216 | err = netdev_name_node_alt_create(dev, name: alt_ifname); |
4217 | if (!err) |
4218 | alt_ifname = NULL; |
4219 | } else if (cmd == RTM_DELLINKPROP) { |
4220 | err = netdev_name_node_alt_destroy(dev, name: alt_ifname); |
4221 | } else { |
4222 | WARN_ON_ONCE(1); |
4223 | err = -EINVAL; |
4224 | } |
4225 | |
4226 | kfree(objp: alt_ifname); |
4227 | if (!err) |
4228 | *changed = true; |
4229 | return err; |
4230 | } |
4231 | |
4232 | static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, |
4233 | struct netlink_ext_ack *extack) |
4234 | { |
4235 | struct net *net = sock_net(sk: skb->sk); |
4236 | struct nlattr *tb[IFLA_MAX + 1]; |
4237 | struct net_device *dev; |
4238 | struct ifinfomsg *ifm; |
4239 | bool changed = false; |
4240 | struct nlattr *attr; |
4241 | int err, rem; |
4242 | |
4243 | err = nlmsg_parse(nlh, hdrlen: sizeof(*ifm), tb, IFLA_MAX, policy: ifla_policy, extack); |
4244 | if (err) |
4245 | return err; |
4246 | |
4247 | err = rtnl_ensure_unique_netns(tb, extack, netns_id_only: true); |
4248 | if (err) |
4249 | return err; |
4250 | |
4251 | ifm = nlmsg_data(nlh); |
4252 | if (ifm->ifi_index > 0) |
4253 | dev = __dev_get_by_index(net, ifindex: ifm->ifi_index); |
4254 | else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) |
4255 | dev = rtnl_dev_get(net, tb); |
4256 | else |
4257 | return -EINVAL; |
4258 | |
4259 | if (!dev) |
4260 | return -ENODEV; |
4261 | |
4262 | if (!tb[IFLA_PROP_LIST]) |
4263 | return 0; |
4264 | |
4265 | nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { |
4266 | switch (nla_type(nla: attr)) { |
4267 | case IFLA_ALT_IFNAME: |
4268 | err = rtnl_alt_ifname(cmd, dev, attr, changed: &changed, extack); |
4269 | if (err) |
4270 | return err; |
4271 | break; |
4272 | } |
4273 | } |
4274 | |
4275 | if (changed) |
4276 | netdev_state_change(dev); |
4277 | return 0; |
4278 | } |
4279 | |
4280 | static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, |
4281 | struct netlink_ext_ack *extack) |
4282 | { |
4283 | return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); |
4284 | } |
4285 | |
4286 | static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, |
4287 | struct netlink_ext_ack *extack) |
4288 | { |
4289 | return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); |
4290 | } |
4291 | |
4292 | static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb, |
4293 | struct nlmsghdr *nlh) |
4294 | { |
4295 | struct net *net = sock_net(sk: skb->sk); |
4296 | size_t min_ifinfo_dump_size = 0; |
4297 | u32 ext_filter_mask = 0; |
4298 | struct net_device *dev; |
4299 | struct nlattr *nla; |
4300 | int hdrlen, rem; |
4301 | |
4302 | /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ |
4303 | hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? |
4304 | sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); |
4305 | |
4306 | if (nlh->nlmsg_len < nlmsg_msg_size(payload: hdrlen)) |
4307 | return NLMSG_GOODSIZE; |
4308 | |
4309 | nla_for_each_attr_type(nla, IFLA_EXT_MASK, |
4310 | nlmsg_attrdata(nlh, hdrlen), |
4311 | nlmsg_attrlen(nlh, hdrlen), rem) { |
4312 | if (nla_len(nla) == sizeof(u32)) |
4313 | ext_filter_mask = nla_get_u32(nla); |
4314 | } |
4315 | |
4316 | if (!ext_filter_mask) |
4317 | return NLMSG_GOODSIZE; |
4318 | /* |
4319 | * traverse the list of net devices and compute the minimum |
4320 | * buffer size based upon the filter mask. |
4321 | */ |
4322 | rcu_read_lock(); |
4323 | for_each_netdev_rcu(net, dev) { |
4324 | min_ifinfo_dump_size = max(min_ifinfo_dump_size, |
4325 | if_nlmsg_size(dev, ext_filter_mask)); |
4326 | } |
4327 | rcu_read_unlock(); |
4328 | |
4329 | return nlmsg_total_size(payload: min_ifinfo_dump_size); |
4330 | } |
4331 | |
4332 | static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) |
4333 | { |
4334 | int idx; |
4335 | int s_idx = cb->family; |
4336 | int type = cb->nlh->nlmsg_type - RTM_BASE; |
4337 | int ret = 0; |
4338 | |
4339 | if (s_idx == 0) |
4340 | s_idx = 1; |
4341 | |
4342 | for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { |
4343 | struct rtnl_link __rcu **tab; |
4344 | struct rtnl_link *link; |
4345 | rtnl_dumpit_func dumpit; |
4346 | |
4347 | if (idx < s_idx || idx == PF_PACKET) |
4348 | continue; |
4349 | |
4350 | if (type < 0 || type >= RTM_NR_MSGTYPES) |
4351 | continue; |
4352 | |
4353 | tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); |
4354 | if (!tab) |
4355 | continue; |
4356 | |
4357 | link = rcu_dereference_rtnl(tab[type]); |
4358 | if (!link) |
4359 | continue; |
4360 | |
4361 | dumpit = link->dumpit; |
4362 | if (!dumpit) |
4363 | continue; |
4364 | |
4365 | if (idx > s_idx) { |
4366 | memset(&cb->args[0], 0, sizeof(cb->args)); |
4367 | cb->prev_seq = 0; |
4368 | cb->seq = 0; |
4369 | } |
4370 | ret = dumpit(skb, cb); |
4371 | if (ret) |
4372 | break; |
4373 | } |
4374 | cb->family = idx; |
4375 | |
4376 | return skb->len ? : ret; |
4377 | } |
4378 | |
4379 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, |
4380 | unsigned int change, |
4381 | u32 event, gfp_t flags, int *new_nsid, |
4382 | int new_ifindex, u32 portid, |
4383 | const struct nlmsghdr *nlh) |
4384 | { |
4385 | struct net *net = dev_net(dev); |
4386 | struct sk_buff *skb; |
4387 | int err = -ENOBUFS; |
4388 | u32 seq = 0; |
4389 | |
4390 | skb = nlmsg_new(payload: if_nlmsg_size(dev, ext_filter_mask: 0), flags); |
4391 | if (skb == NULL) |
4392 | goto errout; |
4393 | |
4394 | if (nlmsg_report(nlh)) |
4395 | seq = nlmsg_seq(nlh); |
4396 | else |
4397 | portid = 0; |
4398 | |
4399 | err = rtnl_fill_ifinfo(skb, dev, src_net: dev_net(dev), |
4400 | type, pid: portid, seq, change, flags: 0, ext_filter_mask: 0, event, |
4401 | new_nsid, new_ifindex, tgt_netnsid: -1, gfp: flags); |
4402 | if (err < 0) { |
4403 | /* -EMSGSIZE implies BUG in if_nlmsg_size() */ |
4404 | WARN_ON(err == -EMSGSIZE); |
4405 | kfree_skb(skb); |
4406 | goto errout; |
4407 | } |
4408 | return skb; |
4409 | errout: |
4410 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); |
4411 | return NULL; |
4412 | } |
4413 | |
4414 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, |
4415 | u32 portid, const struct nlmsghdr *nlh) |
4416 | { |
4417 | struct net *net = dev_net(dev); |
4418 | |
4419 | rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); |
4420 | } |
4421 | |
4422 | static void rtmsg_ifinfo_event(int type, struct net_device *dev, |
4423 | unsigned int change, u32 event, |
4424 | gfp_t flags, int *new_nsid, int new_ifindex, |
4425 | u32 portid, const struct nlmsghdr *nlh) |
4426 | { |
4427 | struct sk_buff *skb; |
4428 | |
4429 | if (dev->reg_state != NETREG_REGISTERED) |
4430 | return; |
4431 | |
4432 | skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, |
4433 | new_ifindex, portid, nlh); |
4434 | if (skb) |
4435 | rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); |
4436 | } |
4437 | |
4438 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, |
4439 | gfp_t flags, u32 portid, const struct nlmsghdr *nlh) |
4440 | { |
4441 | rtmsg_ifinfo_event(type, dev, change, event: rtnl_get_event(event: 0), flags, |
4442 | NULL, new_ifindex: 0, portid, nlh); |
4443 | } |
4444 | |
4445 | void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, |
4446 | gfp_t flags, int *new_nsid, int new_ifindex) |
4447 | { |
4448 | rtmsg_ifinfo_event(type, dev, change, event: rtnl_get_event(event: 0), flags, |
4449 | new_nsid, new_ifindex, portid: 0, NULL); |
4450 | } |
4451 | |
4452 | static int nlmsg_populate_fdb_fill(struct sk_buff *skb, |
4453 | struct net_device *dev, |
4454 | u8 *addr, u16 vid, u32 pid, u32 seq, |
4455 | int type, unsigned int flags, |
4456 | int nlflags, u16 ndm_state) |
4457 | { |
4458 | struct nlmsghdr *nlh; |
4459 | struct ndmsg *ndm; |
4460 | |
4461 | nlh = nlmsg_put(skb, portid: pid, seq, type, payload: sizeof(*ndm), flags: nlflags); |
4462 | if (!nlh) |
4463 | return -EMSGSIZE; |
4464 | |
4465 | ndm = nlmsg_data(nlh); |
4466 | ndm->ndm_family = AF_BRIDGE; |
4467 | ndm->ndm_pad1 = 0; |
4468 | ndm->ndm_pad2 = 0; |
4469 | ndm->ndm_flags = flags; |
4470 | ndm->ndm_type = 0; |
4471 | ndm->ndm_ifindex = dev->ifindex; |
4472 | ndm->ndm_state = ndm_state; |
4473 | |
4474 | if (nla_put(skb, attrtype: NDA_LLADDR, attrlen: dev->addr_len, data: addr)) |
4475 | goto nla_put_failure; |
4476 | if (vid) |
4477 | if (nla_put(skb, attrtype: NDA_VLAN, attrlen: sizeof(u16), data: &vid)) |
4478 | goto nla_put_failure; |
4479 | |
4480 | nlmsg_end(skb, nlh); |
4481 | return 0; |
4482 | |
4483 | nla_put_failure: |
4484 | nlmsg_cancel(skb, nlh); |
4485 | return -EMSGSIZE; |
4486 | } |
4487 | |
4488 | static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) |
4489 | { |
4490 | return NLMSG_ALIGN(sizeof(struct ndmsg)) + |
4491 | nla_total_size(payload: dev->addr_len) + /* NDA_LLADDR */ |
4492 | nla_total_size(payload: sizeof(u16)) + /* NDA_VLAN */ |
4493 | 0; |
4494 | } |
4495 | |
4496 | static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, |
4497 | u16 ndm_state) |
4498 | { |
4499 | struct net *net = dev_net(dev); |
4500 | struct sk_buff *skb; |
4501 | int err = -ENOBUFS; |
4502 | |
4503 | skb = nlmsg_new(payload: rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); |
4504 | if (!skb) |
4505 | goto errout; |
4506 | |
4507 | err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, |
4508 | pid: 0, seq: 0, type, NTF_SELF, nlflags: 0, ndm_state); |
4509 | if (err < 0) { |
4510 | kfree_skb(skb); |
4511 | goto errout; |
4512 | } |
4513 | |
4514 | rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); |
4515 | return; |
4516 | errout: |
4517 | rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); |
4518 | } |
4519 | |
4520 | /* |
4521 | * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry |
4522 | */ |
4523 | int ndo_dflt_fdb_add(struct ndmsg *ndm, |
4524 | struct nlattr *tb[], |
4525 | struct net_device *dev, |
4526 | const unsigned char *addr, u16 vid, |
4527 | u16 flags) |
4528 | { |
4529 | int err = -EINVAL; |
4530 | |
4531 | /* If aging addresses are supported device will need to |
4532 | * implement its own handler for this. |
4533 | */ |
4534 | if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { |
4535 | netdev_info(dev, format: "default FDB implementation only supports local addresses\n" ); |
4536 | return err; |
4537 | } |
4538 | |
4539 | if (tb[NDA_FLAGS_EXT]) { |
4540 | netdev_info(dev, format: "invalid flags given to default FDB implementation\n" ); |
4541 | return err; |
4542 | } |
4543 | |
4544 | if (vid) { |
4545 | netdev_info(dev, format: "vlans aren't supported yet for dev_uc|mc_add()\n" ); |
4546 | return err; |
4547 | } |
4548 | |
4549 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) |
4550 | err = dev_uc_add_excl(dev, addr); |
4551 | else if (is_multicast_ether_addr(addr)) |
4552 | err = dev_mc_add_excl(dev, addr); |
4553 | |
4554 | /* Only return duplicate errors if NLM_F_EXCL is set */ |
4555 | if (err == -EEXIST && !(flags & NLM_F_EXCL)) |
4556 | err = 0; |
4557 | |
4558 | return err; |
4559 | } |
4560 | EXPORT_SYMBOL(ndo_dflt_fdb_add); |
4561 | |
4562 | static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, |
4563 | struct netlink_ext_ack *extack) |
4564 | { |
4565 | u16 vid = 0; |
4566 | |
4567 | if (vlan_attr) { |
4568 | if (nla_len(nla: vlan_attr) != sizeof(u16)) { |
4569 | NL_SET_ERR_MSG(extack, "invalid vlan attribute size" ); |
4570 | return -EINVAL; |
4571 | } |
4572 | |
4573 | vid = nla_get_u16(nla: vlan_attr); |
4574 | |
4575 | if (!vid || vid >= VLAN_VID_MASK) { |
4576 | NL_SET_ERR_MSG(extack, "invalid vlan id" ); |
4577 | return -EINVAL; |
4578 | } |
4579 | } |
4580 | *p_vid = vid; |
4581 | return 0; |
4582 | } |
4583 | |
4584 | static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, |
4585 | struct netlink_ext_ack *extack) |
4586 | { |
4587 | struct net *net = sock_net(sk: skb->sk); |
4588 | struct ndmsg *ndm; |
4589 | struct nlattr *tb[NDA_MAX+1]; |
4590 | struct net_device *dev; |
4591 | u8 *addr; |
4592 | u16 vid; |
4593 | int err; |
4594 | |
4595 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*ndm), tb, NDA_MAX, NULL, |
4596 | extack); |
4597 | if (err < 0) |
4598 | return err; |
4599 | |
4600 | ndm = nlmsg_data(nlh); |
4601 | if (ndm->ndm_ifindex == 0) { |
4602 | NL_SET_ERR_MSG(extack, "invalid ifindex" ); |
4603 | return -EINVAL; |
4604 | } |
4605 | |
4606 | dev = __dev_get_by_index(net, ifindex: ndm->ndm_ifindex); |
4607 | if (dev == NULL) { |
4608 | NL_SET_ERR_MSG(extack, "unknown ifindex" ); |
4609 | return -ENODEV; |
4610 | } |
4611 | |
4612 | if (!tb[NDA_LLADDR] || nla_len(nla: tb[NDA_LLADDR]) != ETH_ALEN) { |
4613 | NL_SET_ERR_MSG(extack, "invalid address" ); |
4614 | return -EINVAL; |
4615 | } |
4616 | |
4617 | if (dev->type != ARPHRD_ETHER) { |
4618 | NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices" ); |
4619 | return -EINVAL; |
4620 | } |
4621 | |
4622 | addr = nla_data(nla: tb[NDA_LLADDR]); |
4623 | |
4624 | err = fdb_vid_parse(vlan_attr: tb[NDA_VLAN], p_vid: &vid, extack); |
4625 | if (err) |
4626 | return err; |
4627 | |
4628 | err = -EOPNOTSUPP; |
4629 | |
4630 | /* Support fdb on master device the net/bridge default case */ |
4631 | if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && |
4632 | netif_is_bridge_port(dev)) { |
4633 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
4634 | const struct net_device_ops *ops = br_dev->netdev_ops; |
4635 | bool notified = false; |
4636 | |
4637 | err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, |
4638 | nlh->nlmsg_flags, ¬ified, extack); |
4639 | if (err) |
4640 | goto out; |
4641 | else |
4642 | ndm->ndm_flags &= ~NTF_MASTER; |
4643 | } |
4644 | |
4645 | /* Embedded bridge, macvlan, and any other device support */ |
4646 | if ((ndm->ndm_flags & NTF_SELF)) { |
4647 | bool notified = false; |
4648 | |
4649 | if (dev->netdev_ops->ndo_fdb_add) |
4650 | err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, |
4651 | vid, |
4652 | nlh->nlmsg_flags, |
4653 | ¬ified, extack); |
4654 | else |
4655 | err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, |
4656 | nlh->nlmsg_flags); |
4657 | |
4658 | if (!err && !notified) { |
4659 | rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, |
4660 | ndm_state: ndm->ndm_state); |
4661 | ndm->ndm_flags &= ~NTF_SELF; |
4662 | } |
4663 | } |
4664 | out: |
4665 | return err; |
4666 | } |
4667 | |
4668 | /* |
4669 | * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry |
4670 | */ |
4671 | int ndo_dflt_fdb_del(struct ndmsg *ndm, |
4672 | struct nlattr *tb[], |
4673 | struct net_device *dev, |
4674 | const unsigned char *addr, u16 vid) |
4675 | { |
4676 | int err = -EINVAL; |
4677 | |
4678 | /* If aging addresses are supported device will need to |
4679 | * implement its own handler for this. |
4680 | */ |
4681 | if (!(ndm->ndm_state & NUD_PERMANENT)) { |
4682 | netdev_info(dev, format: "default FDB implementation only supports local addresses\n" ); |
4683 | return err; |
4684 | } |
4685 | |
4686 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) |
4687 | err = dev_uc_del(dev, addr); |
4688 | else if (is_multicast_ether_addr(addr)) |
4689 | err = dev_mc_del(dev, addr); |
4690 | |
4691 | return err; |
4692 | } |
4693 | EXPORT_SYMBOL(ndo_dflt_fdb_del); |
4694 | |
4695 | static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, |
4696 | struct netlink_ext_ack *extack) |
4697 | { |
4698 | bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); |
4699 | struct net *net = sock_net(sk: skb->sk); |
4700 | const struct net_device_ops *ops; |
4701 | struct ndmsg *ndm; |
4702 | struct nlattr *tb[NDA_MAX+1]; |
4703 | struct net_device *dev; |
4704 | __u8 *addr = NULL; |
4705 | int err; |
4706 | u16 vid; |
4707 | |
4708 | if (!netlink_capable(skb, CAP_NET_ADMIN)) |
4709 | return -EPERM; |
4710 | |
4711 | if (!del_bulk) { |
4712 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*ndm), tb, NDA_MAX, |
4713 | NULL, extack); |
4714 | } else { |
4715 | /* For bulk delete, the drivers will parse the message with |
4716 | * policy. |
4717 | */ |
4718 | err = nlmsg_parse(nlh, hdrlen: sizeof(*ndm), tb, NDA_MAX, NULL, extack); |
4719 | } |
4720 | if (err < 0) |
4721 | return err; |
4722 | |
4723 | ndm = nlmsg_data(nlh); |
4724 | if (ndm->ndm_ifindex == 0) { |
4725 | NL_SET_ERR_MSG(extack, "invalid ifindex" ); |
4726 | return -EINVAL; |
4727 | } |
4728 | |
4729 | dev = __dev_get_by_index(net, ifindex: ndm->ndm_ifindex); |
4730 | if (dev == NULL) { |
4731 | NL_SET_ERR_MSG(extack, "unknown ifindex" ); |
4732 | return -ENODEV; |
4733 | } |
4734 | |
4735 | if (!del_bulk) { |
4736 | if (!tb[NDA_LLADDR] || nla_len(nla: tb[NDA_LLADDR]) != ETH_ALEN) { |
4737 | NL_SET_ERR_MSG(extack, "invalid address" ); |
4738 | return -EINVAL; |
4739 | } |
4740 | addr = nla_data(nla: tb[NDA_LLADDR]); |
4741 | |
4742 | err = fdb_vid_parse(vlan_attr: tb[NDA_VLAN], p_vid: &vid, extack); |
4743 | if (err) |
4744 | return err; |
4745 | } |
4746 | |
4747 | if (dev->type != ARPHRD_ETHER) { |
4748 | NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices" ); |
4749 | return -EINVAL; |
4750 | } |
4751 | |
4752 | err = -EOPNOTSUPP; |
4753 | |
4754 | /* Support fdb on master device the net/bridge default case */ |
4755 | if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && |
4756 | netif_is_bridge_port(dev)) { |
4757 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
4758 | bool notified = false; |
4759 | |
4760 | ops = br_dev->netdev_ops; |
4761 | if (!del_bulk) { |
4762 | if (ops->ndo_fdb_del) |
4763 | err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, |
4764 | ¬ified, extack); |
4765 | } else { |
4766 | if (ops->ndo_fdb_del_bulk) |
4767 | err = ops->ndo_fdb_del_bulk(nlh, dev, extack); |
4768 | } |
4769 | |
4770 | if (err) |
4771 | goto out; |
4772 | else |
4773 | ndm->ndm_flags &= ~NTF_MASTER; |
4774 | } |
4775 | |
4776 | /* Embedded bridge, macvlan, and any other device support */ |
4777 | if (ndm->ndm_flags & NTF_SELF) { |
4778 | bool notified = false; |
4779 | |
4780 | ops = dev->netdev_ops; |
4781 | if (!del_bulk) { |
4782 | if (ops->ndo_fdb_del) |
4783 | err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, |
4784 | ¬ified, extack); |
4785 | else |
4786 | err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); |
4787 | } else { |
4788 | /* in case err was cleared by NTF_MASTER call */ |
4789 | err = -EOPNOTSUPP; |
4790 | if (ops->ndo_fdb_del_bulk) |
4791 | err = ops->ndo_fdb_del_bulk(nlh, dev, extack); |
4792 | } |
4793 | |
4794 | if (!err) { |
4795 | if (!del_bulk && !notified) |
4796 | rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, |
4797 | ndm_state: ndm->ndm_state); |
4798 | ndm->ndm_flags &= ~NTF_SELF; |
4799 | } |
4800 | } |
4801 | out: |
4802 | return err; |
4803 | } |
4804 | |
4805 | static int nlmsg_populate_fdb(struct sk_buff *skb, |
4806 | struct netlink_callback *cb, |
4807 | struct net_device *dev, |
4808 | int *idx, |
4809 | struct netdev_hw_addr_list *list) |
4810 | { |
4811 | struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; |
4812 | struct netdev_hw_addr *ha; |
4813 | u32 portid, seq; |
4814 | int err; |
4815 | |
4816 | portid = NETLINK_CB(cb->skb).portid; |
4817 | seq = cb->nlh->nlmsg_seq; |
4818 | |
4819 | list_for_each_entry(ha, &list->list, list) { |
4820 | if (*idx < ctx->fdb_idx) |
4821 | goto skip; |
4822 | |
4823 | err = nlmsg_populate_fdb_fill(skb, dev, addr: ha->addr, vid: 0, |
4824 | pid: portid, seq, |
4825 | RTM_NEWNEIGH, NTF_SELF, |
4826 | NLM_F_MULTI, NUD_PERMANENT); |
4827 | if (err < 0) |
4828 | return err; |
4829 | skip: |
4830 | *idx += 1; |
4831 | } |
4832 | return 0; |
4833 | } |
4834 | |
4835 | /** |
4836 | * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. |
4837 | * @skb: socket buffer to store message in |
4838 | * @cb: netlink callback |
4839 | * @dev: netdevice |
4840 | * @filter_dev: ignored |
4841 | * @idx: the number of FDB table entries dumped is added to *@idx |
4842 | * |
4843 | * Default netdevice operation to dump the existing unicast address list. |
4844 | * Returns number of addresses from list put in skb. |
4845 | */ |
4846 | int ndo_dflt_fdb_dump(struct sk_buff *skb, |
4847 | struct netlink_callback *cb, |
4848 | struct net_device *dev, |
4849 | struct net_device *filter_dev, |
4850 | int *idx) |
4851 | { |
4852 | int err; |
4853 | |
4854 | if (dev->type != ARPHRD_ETHER) |
4855 | return -EINVAL; |
4856 | |
4857 | netif_addr_lock_bh(dev); |
4858 | err = nlmsg_populate_fdb(skb, cb, dev, idx, list: &dev->uc); |
4859 | if (err) |
4860 | goto out; |
4861 | err = nlmsg_populate_fdb(skb, cb, dev, idx, list: &dev->mc); |
4862 | out: |
4863 | netif_addr_unlock_bh(dev); |
4864 | return err; |
4865 | } |
4866 | EXPORT_SYMBOL(ndo_dflt_fdb_dump); |
4867 | |
4868 | static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, |
4869 | int *br_idx, int *brport_idx, |
4870 | struct netlink_ext_ack *extack) |
4871 | { |
4872 | struct nlattr *tb[NDA_MAX + 1]; |
4873 | struct ndmsg *ndm; |
4874 | int err, i; |
4875 | |
4876 | ndm = nlmsg_payload(nlh, len: sizeof(*ndm)); |
4877 | if (!ndm) { |
4878 | NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request" ); |
4879 | return -EINVAL; |
4880 | } |
4881 | |
4882 | if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || |
4883 | ndm->ndm_flags || ndm->ndm_type) { |
4884 | NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request" ); |
4885 | return -EINVAL; |
4886 | } |
4887 | |
4888 | err = nlmsg_parse_deprecated_strict(nlh, hdrlen: sizeof(struct ndmsg), tb, |
4889 | NDA_MAX, NULL, extack); |
4890 | if (err < 0) |
4891 | return err; |
4892 | |
4893 | *brport_idx = ndm->ndm_ifindex; |
4894 | for (i = 0; i <= NDA_MAX; ++i) { |
4895 | if (!tb[i]) |
4896 | continue; |
4897 | |
4898 | switch (i) { |
4899 | case NDA_IFINDEX: |
4900 | if (nla_len(nla: tb[i]) != sizeof(u32)) { |
4901 | NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request" ); |
4902 | return -EINVAL; |
4903 | } |
4904 | *brport_idx = nla_get_u32(nla: tb[NDA_IFINDEX]); |
4905 | break; |
4906 | case NDA_MASTER: |
4907 | if (nla_len(nla: tb[i]) != sizeof(u32)) { |
4908 | NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request" ); |
4909 | return -EINVAL; |
4910 | } |
4911 | *br_idx = nla_get_u32(nla: tb[NDA_MASTER]); |
4912 | break; |
4913 | default: |
4914 | NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request" ); |
4915 | return -EINVAL; |
4916 | } |
4917 | } |
4918 | |
4919 | return 0; |
4920 | } |
4921 | |
4922 | static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, |
4923 | int *br_idx, int *brport_idx, |
4924 | struct netlink_ext_ack *extack) |
4925 | { |
4926 | struct nlattr *tb[IFLA_MAX+1]; |
4927 | int err; |
4928 | |
4929 | /* A hack to preserve kernel<->userspace interface. |
4930 | * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. |
4931 | * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. |
4932 | * So, check for ndmsg with an optional u32 attribute (not used here). |
4933 | * Fortunately these sizes don't conflict with the size of ifinfomsg |
4934 | * with an optional attribute. |
4935 | */ |
4936 | if (nlmsg_len(nlh) != sizeof(struct ndmsg) && |
4937 | (nlmsg_len(nlh) != sizeof(struct ndmsg) + |
4938 | nla_attr_size(payload: sizeof(u32)))) { |
4939 | struct ifinfomsg *ifm; |
4940 | |
4941 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(struct ifinfomsg), |
4942 | tb, IFLA_MAX, policy: ifla_policy, |
4943 | extack); |
4944 | if (err < 0) { |
4945 | return -EINVAL; |
4946 | } else if (err == 0) { |
4947 | if (tb[IFLA_MASTER]) |
4948 | *br_idx = nla_get_u32(nla: tb[IFLA_MASTER]); |
4949 | } |
4950 | |
4951 | ifm = nlmsg_data(nlh); |
4952 | *brport_idx = ifm->ifi_index; |
4953 | } |
4954 | return 0; |
4955 | } |
4956 | |
4957 | static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) |
4958 | { |
4959 | const struct net_device_ops *ops = NULL, *cops = NULL; |
4960 | struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; |
4961 | struct net_device *dev, *br_dev = NULL; |
4962 | struct net *net = sock_net(sk: skb->sk); |
4963 | int brport_idx = 0; |
4964 | int br_idx = 0; |
4965 | int fidx = 0; |
4966 | int err; |
4967 | |
4968 | NL_ASSERT_CTX_FITS(struct ndo_fdb_dump_context); |
4969 | |
4970 | if (cb->strict_check) |
4971 | err = valid_fdb_dump_strict(nlh: cb->nlh, br_idx: &br_idx, brport_idx: &brport_idx, |
4972 | extack: cb->extack); |
4973 | else |
4974 | err = valid_fdb_dump_legacy(nlh: cb->nlh, br_idx: &br_idx, brport_idx: &brport_idx, |
4975 | extack: cb->extack); |
4976 | if (err < 0) |
4977 | return err; |
4978 | |
4979 | if (br_idx) { |
4980 | br_dev = __dev_get_by_index(net, ifindex: br_idx); |
4981 | if (!br_dev) |
4982 | return -ENODEV; |
4983 | |
4984 | ops = br_dev->netdev_ops; |
4985 | } |
4986 | |
4987 | for_each_netdev_dump(net, dev, ctx->ifindex) { |
4988 | if (brport_idx && (dev->ifindex != brport_idx)) |
4989 | continue; |
4990 | |
4991 | if (!br_idx) { /* user did not specify a specific bridge */ |
4992 | if (netif_is_bridge_port(dev)) { |
4993 | br_dev = netdev_master_upper_dev_get(dev); |
4994 | cops = br_dev->netdev_ops; |
4995 | } |
4996 | } else { |
4997 | if (dev != br_dev && |
4998 | !netif_is_bridge_port(dev)) |
4999 | continue; |
5000 | |
5001 | if (br_dev != netdev_master_upper_dev_get(dev) && |
5002 | !netif_is_bridge_master(dev)) |
5003 | continue; |
5004 | cops = ops; |
5005 | } |
5006 | |
5007 | if (netif_is_bridge_port(dev)) { |
5008 | if (cops && cops->ndo_fdb_dump) { |
5009 | err = cops->ndo_fdb_dump(skb, cb, br_dev, dev, |
5010 | &fidx); |
5011 | if (err == -EMSGSIZE) |
5012 | break; |
5013 | } |
5014 | } |
5015 | |
5016 | if (dev->netdev_ops->ndo_fdb_dump) |
5017 | err = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL, |
5018 | &fidx); |
5019 | else |
5020 | err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, &fidx); |
5021 | if (err == -EMSGSIZE) |
5022 | break; |
5023 | |
5024 | cops = NULL; |
5025 | |
5026 | /* reset fdb offset to 0 for rest of the interfaces */ |
5027 | ctx->fdb_idx = 0; |
5028 | fidx = 0; |
5029 | } |
5030 | |
5031 | ctx->fdb_idx = fidx; |
5032 | |
5033 | return skb->len; |
5034 | } |
5035 | |
5036 | static int valid_fdb_get_strict(const struct nlmsghdr *nlh, |
5037 | struct nlattr **tb, u8 *ndm_flags, |
5038 | int *br_idx, int *brport_idx, u8 **addr, |
5039 | u16 *vid, struct netlink_ext_ack *extack) |
5040 | { |
5041 | struct ndmsg *ndm; |
5042 | int err, i; |
5043 | |
5044 | ndm = nlmsg_payload(nlh, len: sizeof(*ndm)); |
5045 | if (!ndm) { |
5046 | NL_SET_ERR_MSG(extack, "Invalid header for fdb get request" ); |
5047 | return -EINVAL; |
5048 | } |
5049 | |
5050 | if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || |
5051 | ndm->ndm_type) { |
5052 | NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request" ); |
5053 | return -EINVAL; |
5054 | } |
5055 | |
5056 | if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { |
5057 | NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request" ); |
5058 | return -EINVAL; |
5059 | } |
5060 | |
5061 | err = nlmsg_parse_deprecated_strict(nlh, hdrlen: sizeof(struct ndmsg), tb, |
5062 | NDA_MAX, policy: nda_policy, extack); |
5063 | if (err < 0) |
5064 | return err; |
5065 | |
5066 | *ndm_flags = ndm->ndm_flags; |
5067 | *brport_idx = ndm->ndm_ifindex; |
5068 | for (i = 0; i <= NDA_MAX; ++i) { |
5069 | if (!tb[i]) |
5070 | continue; |
5071 | |
5072 | switch (i) { |
5073 | case NDA_MASTER: |
5074 | *br_idx = nla_get_u32(nla: tb[i]); |
5075 | break; |
5076 | case NDA_LLADDR: |
5077 | if (nla_len(nla: tb[i]) != ETH_ALEN) { |
5078 | NL_SET_ERR_MSG(extack, "Invalid address in fdb get request" ); |
5079 | return -EINVAL; |
5080 | } |
5081 | *addr = nla_data(nla: tb[i]); |
5082 | break; |
5083 | case NDA_VLAN: |
5084 | err = fdb_vid_parse(vlan_attr: tb[i], p_vid: vid, extack); |
5085 | if (err) |
5086 | return err; |
5087 | break; |
5088 | case NDA_VNI: |
5089 | break; |
5090 | default: |
5091 | NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request" ); |
5092 | return -EINVAL; |
5093 | } |
5094 | } |
5095 | |
5096 | return 0; |
5097 | } |
5098 | |
5099 | static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, |
5100 | struct netlink_ext_ack *extack) |
5101 | { |
5102 | struct net_device *dev = NULL, *br_dev = NULL; |
5103 | const struct net_device_ops *ops = NULL; |
5104 | struct net *net = sock_net(sk: in_skb->sk); |
5105 | struct nlattr *tb[NDA_MAX + 1]; |
5106 | struct sk_buff *skb; |
5107 | int brport_idx = 0; |
5108 | u8 ndm_flags = 0; |
5109 | int br_idx = 0; |
5110 | u8 *addr = NULL; |
5111 | u16 vid = 0; |
5112 | int err; |
5113 | |
5114 | err = valid_fdb_get_strict(nlh, tb, ndm_flags: &ndm_flags, br_idx: &br_idx, |
5115 | brport_idx: &brport_idx, addr: &addr, vid: &vid, extack); |
5116 | if (err < 0) |
5117 | return err; |
5118 | |
5119 | if (!addr) { |
5120 | NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request" ); |
5121 | return -EINVAL; |
5122 | } |
5123 | |
5124 | if (brport_idx) { |
5125 | dev = __dev_get_by_index(net, ifindex: brport_idx); |
5126 | if (!dev) { |
5127 | NL_SET_ERR_MSG(extack, "Unknown device ifindex" ); |
5128 | return -ENODEV; |
5129 | } |
5130 | } |
5131 | |
5132 | if (br_idx) { |
5133 | if (dev) { |
5134 | NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive" ); |
5135 | return -EINVAL; |
5136 | } |
5137 | |
5138 | br_dev = __dev_get_by_index(net, ifindex: br_idx); |
5139 | if (!br_dev) { |
5140 | NL_SET_ERR_MSG(extack, "Invalid master ifindex" ); |
5141 | return -EINVAL; |
5142 | } |
5143 | ops = br_dev->netdev_ops; |
5144 | } |
5145 | |
5146 | if (dev) { |
5147 | if (!ndm_flags || (ndm_flags & NTF_MASTER)) { |
5148 | if (!netif_is_bridge_port(dev)) { |
5149 | NL_SET_ERR_MSG(extack, "Device is not a bridge port" ); |
5150 | return -EINVAL; |
5151 | } |
5152 | br_dev = netdev_master_upper_dev_get(dev); |
5153 | if (!br_dev) { |
5154 | NL_SET_ERR_MSG(extack, "Master of device not found" ); |
5155 | return -EINVAL; |
5156 | } |
5157 | ops = br_dev->netdev_ops; |
5158 | } else { |
5159 | if (!(ndm_flags & NTF_SELF)) { |
5160 | NL_SET_ERR_MSG(extack, "Missing NTF_SELF" ); |
5161 | return -EINVAL; |
5162 | } |
5163 | ops = dev->netdev_ops; |
5164 | } |
5165 | } |
5166 | |
5167 | if (!br_dev && !dev) { |
5168 | NL_SET_ERR_MSG(extack, "No device specified" ); |
5169 | return -ENODEV; |
5170 | } |
5171 | |
5172 | if (!ops || !ops->ndo_fdb_get) { |
5173 | NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device" ); |
5174 | return -EOPNOTSUPP; |
5175 | } |
5176 | |
5177 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); |
5178 | if (!skb) |
5179 | return -ENOBUFS; |
5180 | |
5181 | if (br_dev) |
5182 | dev = br_dev; |
5183 | err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, |
5184 | NETLINK_CB(in_skb).portid, |
5185 | nlh->nlmsg_seq, extack); |
5186 | if (err) |
5187 | goto out; |
5188 | |
5189 | return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); |
5190 | out: |
5191 | kfree_skb(skb); |
5192 | return err; |
5193 | } |
5194 | |
5195 | static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, |
5196 | unsigned int attrnum, unsigned int flag) |
5197 | { |
5198 | if (mask & flag) |
5199 | return nla_put_u8(skb, attrtype: attrnum, value: !!(flags & flag)); |
5200 | return 0; |
5201 | } |
5202 | |
5203 | int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
5204 | struct net_device *dev, u16 mode, |
5205 | u32 flags, u32 mask, int nlflags, |
5206 | u32 filter_mask, |
5207 | int (*vlan_fill)(struct sk_buff *skb, |
5208 | struct net_device *dev, |
5209 | u32 filter_mask)) |
5210 | { |
5211 | struct nlmsghdr *nlh; |
5212 | struct ifinfomsg *ifm; |
5213 | struct nlattr *br_afspec; |
5214 | struct nlattr *protinfo; |
5215 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; |
5216 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
5217 | int err = 0; |
5218 | |
5219 | nlh = nlmsg_put(skb, portid: pid, seq, RTM_NEWLINK, payload: sizeof(*ifm), flags: nlflags); |
5220 | if (nlh == NULL) |
5221 | return -EMSGSIZE; |
5222 | |
5223 | ifm = nlmsg_data(nlh); |
5224 | ifm->ifi_family = AF_BRIDGE; |
5225 | ifm->__ifi_pad = 0; |
5226 | ifm->ifi_type = dev->type; |
5227 | ifm->ifi_index = dev->ifindex; |
5228 | ifm->ifi_flags = dev_get_flags(dev); |
5229 | ifm->ifi_change = 0; |
5230 | |
5231 | |
5232 | if (nla_put_string(skb, attrtype: IFLA_IFNAME, str: dev->name) || |
5233 | nla_put_u32(skb, attrtype: IFLA_MTU, value: dev->mtu) || |
5234 | nla_put_u8(skb, attrtype: IFLA_OPERSTATE, value: operstate) || |
5235 | (br_dev && |
5236 | nla_put_u32(skb, IFLA_MASTER, value: br_dev->ifindex)) || |
5237 | (dev->addr_len && |
5238 | nla_put(skb, attrtype: IFLA_ADDRESS, attrlen: dev->addr_len, data: dev->dev_addr)) || |
5239 | (dev->ifindex != dev_get_iflink(dev) && |
5240 | nla_put_u32(skb, attrtype: IFLA_LINK, value: dev_get_iflink(dev)))) |
5241 | goto nla_put_failure; |
5242 | |
5243 | br_afspec = nla_nest_start_noflag(skb, attrtype: IFLA_AF_SPEC); |
5244 | if (!br_afspec) |
5245 | goto nla_put_failure; |
5246 | |
5247 | if (nla_put_u16(skb, attrtype: IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { |
5248 | nla_nest_cancel(skb, start: br_afspec); |
5249 | goto nla_put_failure; |
5250 | } |
5251 | |
5252 | if (mode != BRIDGE_MODE_UNDEF) { |
5253 | if (nla_put_u16(skb, attrtype: IFLA_BRIDGE_MODE, value: mode)) { |
5254 | nla_nest_cancel(skb, start: br_afspec); |
5255 | goto nla_put_failure; |
5256 | } |
5257 | } |
5258 | if (vlan_fill) { |
5259 | err = vlan_fill(skb, dev, filter_mask); |
5260 | if (err) { |
5261 | nla_nest_cancel(skb, start: br_afspec); |
5262 | goto nla_put_failure; |
5263 | } |
5264 | } |
5265 | nla_nest_end(skb, start: br_afspec); |
5266 | |
5267 | protinfo = nla_nest_start(skb, IFLA_PROTINFO); |
5268 | if (!protinfo) |
5269 | goto nla_put_failure; |
5270 | |
5271 | if (brport_nla_put_flag(skb, flags, mask, |
5272 | attrnum: IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || |
5273 | brport_nla_put_flag(skb, flags, mask, |
5274 | attrnum: IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || |
5275 | brport_nla_put_flag(skb, flags, mask, |
5276 | attrnum: IFLA_BRPORT_FAST_LEAVE, |
5277 | BR_MULTICAST_FAST_LEAVE) || |
5278 | brport_nla_put_flag(skb, flags, mask, |
5279 | attrnum: IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || |
5280 | brport_nla_put_flag(skb, flags, mask, |
5281 | attrnum: IFLA_BRPORT_LEARNING, BR_LEARNING) || |
5282 | brport_nla_put_flag(skb, flags, mask, |
5283 | attrnum: IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || |
5284 | brport_nla_put_flag(skb, flags, mask, |
5285 | attrnum: IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || |
5286 | brport_nla_put_flag(skb, flags, mask, |
5287 | attrnum: IFLA_BRPORT_PROXYARP, BR_PROXYARP) || |
5288 | brport_nla_put_flag(skb, flags, mask, |
5289 | attrnum: IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || |
5290 | brport_nla_put_flag(skb, flags, mask, |
5291 | attrnum: IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { |
5292 | nla_nest_cancel(skb, start: protinfo); |
5293 | goto nla_put_failure; |
5294 | } |
5295 | |
5296 | nla_nest_end(skb, start: protinfo); |
5297 | |
5298 | nlmsg_end(skb, nlh); |
5299 | return 0; |
5300 | nla_put_failure: |
5301 | nlmsg_cancel(skb, nlh); |
5302 | return err ? err : -EMSGSIZE; |
5303 | } |
5304 | EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); |
5305 | |
5306 | static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, |
5307 | bool strict_check, u32 *filter_mask, |
5308 | struct netlink_ext_ack *extack) |
5309 | { |
5310 | struct nlattr *tb[IFLA_MAX+1]; |
5311 | int err, i; |
5312 | |
5313 | if (strict_check) { |
5314 | struct ifinfomsg *ifm; |
5315 | |
5316 | ifm = nlmsg_payload(nlh, len: sizeof(*ifm)); |
5317 | if (!ifm) { |
5318 | NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump" ); |
5319 | return -EINVAL; |
5320 | } |
5321 | |
5322 | if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || |
5323 | ifm->ifi_change || ifm->ifi_index) { |
5324 | NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request" ); |
5325 | return -EINVAL; |
5326 | } |
5327 | |
5328 | err = nlmsg_parse_deprecated_strict(nlh, |
5329 | hdrlen: sizeof(struct ifinfomsg), |
5330 | tb, IFLA_MAX, policy: ifla_policy, |
5331 | extack); |
5332 | } else { |
5333 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(struct ifinfomsg), |
5334 | tb, IFLA_MAX, policy: ifla_policy, |
5335 | extack); |
5336 | } |
5337 | if (err < 0) |
5338 | return err; |
5339 | |
5340 | /* new attributes should only be added with strict checking */ |
5341 | for (i = 0; i <= IFLA_MAX; ++i) { |
5342 | if (!tb[i]) |
5343 | continue; |
5344 | |
5345 | switch (i) { |
5346 | case IFLA_EXT_MASK: |
5347 | *filter_mask = nla_get_u32(nla: tb[i]); |
5348 | break; |
5349 | default: |
5350 | if (strict_check) { |
5351 | NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request" ); |
5352 | return -EINVAL; |
5353 | } |
5354 | } |
5355 | } |
5356 | |
5357 | return 0; |
5358 | } |
5359 | |
5360 | static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) |
5361 | { |
5362 | const struct nlmsghdr *nlh = cb->nlh; |
5363 | struct net *net = sock_net(sk: skb->sk); |
5364 | struct net_device *dev; |
5365 | int idx = 0; |
5366 | u32 portid = NETLINK_CB(cb->skb).portid; |
5367 | u32 seq = nlh->nlmsg_seq; |
5368 | u32 filter_mask = 0; |
5369 | int err; |
5370 | |
5371 | err = valid_bridge_getlink_req(nlh, strict_check: cb->strict_check, filter_mask: &filter_mask, |
5372 | extack: cb->extack); |
5373 | if (err < 0 && cb->strict_check) |
5374 | return err; |
5375 | |
5376 | rcu_read_lock(); |
5377 | for_each_netdev_rcu(net, dev) { |
5378 | const struct net_device_ops *ops = dev->netdev_ops; |
5379 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
5380 | |
5381 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { |
5382 | if (idx >= cb->args[0]) { |
5383 | err = br_dev->netdev_ops->ndo_bridge_getlink( |
5384 | skb, portid, seq, dev, |
5385 | filter_mask, NLM_F_MULTI); |
5386 | if (err < 0 && err != -EOPNOTSUPP) { |
5387 | if (likely(skb->len)) |
5388 | break; |
5389 | |
5390 | goto out_err; |
5391 | } |
5392 | } |
5393 | idx++; |
5394 | } |
5395 | |
5396 | if (ops->ndo_bridge_getlink) { |
5397 | if (idx >= cb->args[0]) { |
5398 | err = ops->ndo_bridge_getlink(skb, portid, |
5399 | seq, dev, |
5400 | filter_mask, |
5401 | NLM_F_MULTI); |
5402 | if (err < 0 && err != -EOPNOTSUPP) { |
5403 | if (likely(skb->len)) |
5404 | break; |
5405 | |
5406 | goto out_err; |
5407 | } |
5408 | } |
5409 | idx++; |
5410 | } |
5411 | } |
5412 | err = skb->len; |
5413 | out_err: |
5414 | rcu_read_unlock(); |
5415 | cb->args[0] = idx; |
5416 | |
5417 | return err; |
5418 | } |
5419 | |
5420 | static inline size_t bridge_nlmsg_size(void) |
5421 | { |
5422 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
5423 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ |
5424 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
5425 | + nla_total_size(payload: sizeof(u32)) /* IFLA_MASTER */ |
5426 | + nla_total_size(payload: sizeof(u32)) /* IFLA_MTU */ |
5427 | + nla_total_size(payload: sizeof(u32)) /* IFLA_LINK */ |
5428 | + nla_total_size(payload: sizeof(u32)) /* IFLA_OPERSTATE */ |
5429 | + nla_total_size(payload: sizeof(u8)) /* IFLA_PROTINFO */ |
5430 | + nla_total_size(payload: sizeof(struct nlattr)) /* IFLA_AF_SPEC */ |
5431 | + nla_total_size(payload: sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ |
5432 | + nla_total_size(payload: sizeof(u16)); /* IFLA_BRIDGE_MODE */ |
5433 | } |
5434 | |
5435 | static int rtnl_bridge_notify(struct net_device *dev) |
5436 | { |
5437 | struct net *net = dev_net(dev); |
5438 | struct sk_buff *skb; |
5439 | int err = -EOPNOTSUPP; |
5440 | |
5441 | if (!dev->netdev_ops->ndo_bridge_getlink) |
5442 | return 0; |
5443 | |
5444 | skb = nlmsg_new(payload: bridge_nlmsg_size(), GFP_ATOMIC); |
5445 | if (!skb) { |
5446 | err = -ENOMEM; |
5447 | goto errout; |
5448 | } |
5449 | |
5450 | err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); |
5451 | if (err < 0) |
5452 | goto errout; |
5453 | |
5454 | /* Notification info is only filled for bridge ports, not the bridge |
5455 | * device itself. Therefore, a zero notification length is valid and |
5456 | * should not result in an error. |
5457 | */ |
5458 | if (!skb->len) |
5459 | goto errout; |
5460 | |
5461 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); |
5462 | return 0; |
5463 | errout: |
5464 | WARN_ON(err == -EMSGSIZE); |
5465 | kfree_skb(skb); |
5466 | if (err) |
5467 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); |
5468 | return err; |
5469 | } |
5470 | |
5471 | static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, |
5472 | struct netlink_ext_ack *extack) |
5473 | { |
5474 | struct net *net = sock_net(sk: skb->sk); |
5475 | struct ifinfomsg *ifm; |
5476 | struct net_device *dev; |
5477 | struct nlattr *br_spec, *attr, *br_flags_attr = NULL; |
5478 | int rem, err = -EOPNOTSUPP; |
5479 | u16 flags = 0; |
5480 | |
5481 | if (nlmsg_len(nlh) < sizeof(*ifm)) |
5482 | return -EINVAL; |
5483 | |
5484 | ifm = nlmsg_data(nlh); |
5485 | if (ifm->ifi_family != AF_BRIDGE) |
5486 | return -EPFNOSUPPORT; |
5487 | |
5488 | dev = __dev_get_by_index(net, ifindex: ifm->ifi_index); |
5489 | if (!dev) { |
5490 | NL_SET_ERR_MSG(extack, "unknown ifindex" ); |
5491 | return -ENODEV; |
5492 | } |
5493 | |
5494 | br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC); |
5495 | if (br_spec) { |
5496 | nla_for_each_nested(attr, br_spec, rem) { |
5497 | if (nla_type(nla: attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { |
5498 | if (nla_len(nla: attr) < sizeof(flags)) |
5499 | return -EINVAL; |
5500 | |
5501 | br_flags_attr = attr; |
5502 | flags = nla_get_u16(nla: attr); |
5503 | } |
5504 | |
5505 | if (nla_type(nla: attr) == IFLA_BRIDGE_MODE) { |
5506 | if (nla_len(nla: attr) < sizeof(u16)) |
5507 | return -EINVAL; |
5508 | } |
5509 | } |
5510 | } |
5511 | |
5512 | if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { |
5513 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
5514 | |
5515 | if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { |
5516 | err = -EOPNOTSUPP; |
5517 | goto out; |
5518 | } |
5519 | |
5520 | err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, |
5521 | extack); |
5522 | if (err) |
5523 | goto out; |
5524 | |
5525 | flags &= ~BRIDGE_FLAGS_MASTER; |
5526 | } |
5527 | |
5528 | if ((flags & BRIDGE_FLAGS_SELF)) { |
5529 | if (!dev->netdev_ops->ndo_bridge_setlink) |
5530 | err = -EOPNOTSUPP; |
5531 | else |
5532 | err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, |
5533 | flags, |
5534 | extack); |
5535 | if (!err) { |
5536 | flags &= ~BRIDGE_FLAGS_SELF; |
5537 | |
5538 | /* Generate event to notify upper layer of bridge |
5539 | * change |
5540 | */ |
5541 | err = rtnl_bridge_notify(dev); |
5542 | } |
5543 | } |
5544 | |
5545 | if (br_flags_attr) |
5546 | memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); |
5547 | out: |
5548 | return err; |
5549 | } |
5550 | |
5551 | static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, |
5552 | struct netlink_ext_ack *extack) |
5553 | { |
5554 | struct net *net = sock_net(sk: skb->sk); |
5555 | struct ifinfomsg *ifm; |
5556 | struct net_device *dev; |
5557 | struct nlattr *br_spec, *attr = NULL; |
5558 | int rem, err = -EOPNOTSUPP; |
5559 | u16 flags = 0; |
5560 | bool have_flags = false; |
5561 | |
5562 | if (nlmsg_len(nlh) < sizeof(*ifm)) |
5563 | return -EINVAL; |
5564 | |
5565 | ifm = nlmsg_data(nlh); |
5566 | if (ifm->ifi_family != AF_BRIDGE) |
5567 | return -EPFNOSUPPORT; |
5568 | |
5569 | dev = __dev_get_by_index(net, ifindex: ifm->ifi_index); |
5570 | if (!dev) { |
5571 | NL_SET_ERR_MSG(extack, "unknown ifindex" ); |
5572 | return -ENODEV; |
5573 | } |
5574 | |
5575 | br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC); |
5576 | if (br_spec) { |
5577 | nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec, |
5578 | rem) { |
5579 | if (nla_len(nla: attr) < sizeof(flags)) |
5580 | return -EINVAL; |
5581 | |
5582 | have_flags = true; |
5583 | flags = nla_get_u16(nla: attr); |
5584 | break; |
5585 | } |
5586 | } |
5587 | |
5588 | if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { |
5589 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
5590 | |
5591 | if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { |
5592 | err = -EOPNOTSUPP; |
5593 | goto out; |
5594 | } |
5595 | |
5596 | err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); |
5597 | if (err) |
5598 | goto out; |
5599 | |
5600 | flags &= ~BRIDGE_FLAGS_MASTER; |
5601 | } |
5602 | |
5603 | if ((flags & BRIDGE_FLAGS_SELF)) { |
5604 | if (!dev->netdev_ops->ndo_bridge_dellink) |
5605 | err = -EOPNOTSUPP; |
5606 | else |
5607 | err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, |
5608 | flags); |
5609 | |
5610 | if (!err) { |
5611 | flags &= ~BRIDGE_FLAGS_SELF; |
5612 | |
5613 | /* Generate event to notify upper layer of bridge |
5614 | * change |
5615 | */ |
5616 | err = rtnl_bridge_notify(dev); |
5617 | } |
5618 | } |
5619 | |
5620 | if (have_flags) |
5621 | memcpy(nla_data(attr), &flags, sizeof(flags)); |
5622 | out: |
5623 | return err; |
5624 | } |
5625 | |
5626 | static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) |
5627 | { |
5628 | return (mask & IFLA_STATS_FILTER_BIT(attrid)) && |
5629 | (!idxattr || idxattr == attrid); |
5630 | } |
5631 | |
5632 | static bool |
5633 | rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) |
5634 | { |
5635 | return dev->netdev_ops && |
5636 | dev->netdev_ops->ndo_has_offload_stats && |
5637 | dev->netdev_ops->ndo_get_offload_stats && |
5638 | dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); |
5639 | } |
5640 | |
5641 | static unsigned int |
5642 | rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) |
5643 | { |
5644 | return rtnl_offload_xstats_have_ndo(dev, attr_id) ? |
5645 | sizeof(struct rtnl_link_stats64) : 0; |
5646 | } |
5647 | |
5648 | static int |
5649 | rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, |
5650 | struct sk_buff *skb) |
5651 | { |
5652 | unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); |
5653 | struct nlattr *attr = NULL; |
5654 | void *attr_data; |
5655 | int err; |
5656 | |
5657 | if (!size) |
5658 | return -ENODATA; |
5659 | |
5660 | attr = nla_reserve_64bit(skb, attrtype: attr_id, attrlen: size, |
5661 | padattr: IFLA_OFFLOAD_XSTATS_UNSPEC); |
5662 | if (!attr) |
5663 | return -EMSGSIZE; |
5664 | |
5665 | attr_data = nla_data(nla: attr); |
5666 | memset(attr_data, 0, size); |
5667 | |
5668 | err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); |
5669 | if (err) |
5670 | return err; |
5671 | |
5672 | return 0; |
5673 | } |
5674 | |
5675 | static unsigned int |
5676 | rtnl_offload_xstats_get_size_stats(const struct net_device *dev, |
5677 | enum netdev_offload_xstats_type type) |
5678 | { |
5679 | bool enabled = netdev_offload_xstats_enabled(dev, type); |
5680 | |
5681 | return enabled ? sizeof(struct rtnl_hw_stats64) : 0; |
5682 | } |
5683 | |
5684 | struct rtnl_offload_xstats_request_used { |
5685 | bool request; |
5686 | bool used; |
5687 | }; |
5688 | |
5689 | static int |
5690 | rtnl_offload_xstats_get_stats(struct net_device *dev, |
5691 | enum netdev_offload_xstats_type type, |
5692 | struct rtnl_offload_xstats_request_used *ru, |
5693 | struct rtnl_hw_stats64 *stats, |
5694 | struct netlink_ext_ack *extack) |
5695 | { |
5696 | bool request; |
5697 | bool used; |
5698 | int err; |
5699 | |
5700 | request = netdev_offload_xstats_enabled(dev, type); |
5701 | if (!request) { |
5702 | used = false; |
5703 | goto out; |
5704 | } |
5705 | |
5706 | err = netdev_offload_xstats_get(dev, type, stats, used: &used, extack); |
5707 | if (err) |
5708 | return err; |
5709 | |
5710 | out: |
5711 | if (ru) { |
5712 | ru->request = request; |
5713 | ru->used = used; |
5714 | } |
5715 | return 0; |
5716 | } |
5717 | |
5718 | static int |
5719 | rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, |
5720 | struct rtnl_offload_xstats_request_used *ru) |
5721 | { |
5722 | struct nlattr *nest; |
5723 | |
5724 | nest = nla_nest_start(skb, attrtype: attr_id); |
5725 | if (!nest) |
5726 | return -EMSGSIZE; |
5727 | |
5728 | if (nla_put_u8(skb, attrtype: IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, value: ru->request)) |
5729 | goto nla_put_failure; |
5730 | |
5731 | if (nla_put_u8(skb, attrtype: IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, value: ru->used)) |
5732 | goto nla_put_failure; |
5733 | |
5734 | nla_nest_end(skb, start: nest); |
5735 | return 0; |
5736 | |
5737 | nla_put_failure: |
5738 | nla_nest_cancel(skb, start: nest); |
5739 | return -EMSGSIZE; |
5740 | } |
5741 | |
5742 | static int |
5743 | rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, |
5744 | struct netlink_ext_ack *extack) |
5745 | { |
5746 | enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; |
5747 | struct rtnl_offload_xstats_request_used ru_l3; |
5748 | struct nlattr *nest; |
5749 | int err; |
5750 | |
5751 | err = rtnl_offload_xstats_get_stats(dev, type: t_l3, ru: &ru_l3, NULL, extack); |
5752 | if (err) |
5753 | return err; |
5754 | |
5755 | nest = nla_nest_start(skb, attrtype: IFLA_OFFLOAD_XSTATS_HW_S_INFO); |
5756 | if (!nest) |
5757 | return -EMSGSIZE; |
5758 | |
5759 | if (rtnl_offload_xstats_fill_hw_s_info_one(skb, |
5760 | attr_id: IFLA_OFFLOAD_XSTATS_L3_STATS, |
5761 | ru: &ru_l3)) |
5762 | goto nla_put_failure; |
5763 | |
5764 | nla_nest_end(skb, start: nest); |
5765 | return 0; |
5766 | |
5767 | nla_put_failure: |
5768 | nla_nest_cancel(skb, start: nest); |
5769 | return -EMSGSIZE; |
5770 | } |
5771 | |
5772 | static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, |
5773 | int *prividx, u32 off_filter_mask, |
5774 | struct netlink_ext_ack *extack) |
5775 | { |
5776 | enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; |
5777 | int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; |
5778 | int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; |
5779 | int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; |
5780 | bool have_data = false; |
5781 | int err; |
5782 | |
5783 | if (*prividx <= attr_id_cpu_hit && |
5784 | (off_filter_mask & |
5785 | IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { |
5786 | err = rtnl_offload_xstats_fill_ndo(dev, attr_id: attr_id_cpu_hit, skb); |
5787 | if (!err) { |
5788 | have_data = true; |
5789 | } else if (err != -ENODATA) { |
5790 | *prividx = attr_id_cpu_hit; |
5791 | return err; |
5792 | } |
5793 | } |
5794 | |
5795 | if (*prividx <= attr_id_hw_s_info && |
5796 | (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { |
5797 | *prividx = attr_id_hw_s_info; |
5798 | |
5799 | err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); |
5800 | if (err) |
5801 | return err; |
5802 | |
5803 | have_data = true; |
5804 | *prividx = 0; |
5805 | } |
5806 | |
5807 | if (*prividx <= attr_id_l3_stats && |
5808 | (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { |
5809 | unsigned int size_l3; |
5810 | struct nlattr *attr; |
5811 | |
5812 | *prividx = attr_id_l3_stats; |
5813 | |
5814 | size_l3 = rtnl_offload_xstats_get_size_stats(dev, type: t_l3); |
5815 | if (!size_l3) |
5816 | goto skip_l3_stats; |
5817 | attr = nla_reserve_64bit(skb, attrtype: attr_id_l3_stats, attrlen: size_l3, |
5818 | padattr: IFLA_OFFLOAD_XSTATS_UNSPEC); |
5819 | if (!attr) |
5820 | return -EMSGSIZE; |
5821 | |
5822 | err = rtnl_offload_xstats_get_stats(dev, type: t_l3, NULL, |
5823 | stats: nla_data(nla: attr), extack); |
5824 | if (err) |
5825 | return err; |
5826 | |
5827 | have_data = true; |
5828 | skip_l3_stats: |
5829 | *prividx = 0; |
5830 | } |
5831 | |
5832 | if (!have_data) |
5833 | return -ENODATA; |
5834 | |
5835 | *prividx = 0; |
5836 | return 0; |
5837 | } |
5838 | |
5839 | static unsigned int |
5840 | rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, |
5841 | enum netdev_offload_xstats_type type) |
5842 | { |
5843 | return nla_total_size(payload: 0) + |
5844 | /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ |
5845 | nla_total_size(payload: sizeof(u8)) + |
5846 | /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ |
5847 | nla_total_size(payload: sizeof(u8)) + |
5848 | 0; |
5849 | } |
5850 | |
5851 | static unsigned int |
5852 | rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) |
5853 | { |
5854 | enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; |
5855 | |
5856 | return nla_total_size(payload: 0) + |
5857 | /* IFLA_OFFLOAD_XSTATS_L3_STATS */ |
5858 | rtnl_offload_xstats_get_size_hw_s_info_one(dev, type: t_l3) + |
5859 | 0; |
5860 | } |
5861 | |
5862 | static int rtnl_offload_xstats_get_size(const struct net_device *dev, |
5863 | u32 off_filter_mask) |
5864 | { |
5865 | enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; |
5866 | int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; |
5867 | int nla_size = 0; |
5868 | int size; |
5869 | |
5870 | if (off_filter_mask & |
5871 | IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { |
5872 | size = rtnl_offload_xstats_get_size_ndo(dev, attr_id: attr_id_cpu_hit); |
5873 | nla_size += nla_total_size_64bit(payload: size); |
5874 | } |
5875 | |
5876 | if (off_filter_mask & |
5877 | IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) |
5878 | nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); |
5879 | |
5880 | if (off_filter_mask & |
5881 | IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { |
5882 | size = rtnl_offload_xstats_get_size_stats(dev, type: t_l3); |
5883 | nla_size += nla_total_size_64bit(payload: size); |
5884 | } |
5885 | |
5886 | if (nla_size != 0) |
5887 | nla_size += nla_total_size(payload: 0); |
5888 | |
5889 | return nla_size; |
5890 | } |
5891 | |
5892 | struct rtnl_stats_dump_filters { |
5893 | /* mask[0] filters outer attributes. Then individual nests have their |
5894 | * filtering mask at the index of the nested attribute. |
5895 | */ |
5896 | u32 mask[IFLA_STATS_MAX + 1]; |
5897 | }; |
5898 | |
5899 | static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, |
5900 | int type, u32 pid, u32 seq, u32 change, |
5901 | unsigned int flags, |
5902 | const struct rtnl_stats_dump_filters *filters, |
5903 | int *idxattr, int *prividx, |
5904 | struct netlink_ext_ack *extack) |
5905 | { |
5906 | unsigned int filter_mask = filters->mask[0]; |
5907 | struct if_stats_msg *ifsm; |
5908 | struct nlmsghdr *nlh; |
5909 | struct nlattr *attr; |
5910 | int s_prividx = *prividx; |
5911 | int err; |
5912 | |
5913 | ASSERT_RTNL(); |
5914 | |
5915 | nlh = nlmsg_put(skb, portid: pid, seq, type, payload: sizeof(*ifsm), flags); |
5916 | if (!nlh) |
5917 | return -EMSGSIZE; |
5918 | |
5919 | ifsm = nlmsg_data(nlh); |
5920 | ifsm->family = PF_UNSPEC; |
5921 | ifsm->pad1 = 0; |
5922 | ifsm->pad2 = 0; |
5923 | ifsm->ifindex = dev->ifindex; |
5924 | ifsm->filter_mask = filter_mask; |
5925 | |
5926 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_64, idxattr: *idxattr)) { |
5927 | struct rtnl_link_stats64 *sp; |
5928 | |
5929 | attr = nla_reserve_64bit(skb, attrtype: IFLA_STATS_LINK_64, |
5930 | attrlen: sizeof(struct rtnl_link_stats64), |
5931 | padattr: IFLA_STATS_UNSPEC); |
5932 | if (!attr) { |
5933 | err = -EMSGSIZE; |
5934 | goto nla_put_failure; |
5935 | } |
5936 | |
5937 | sp = nla_data(nla: attr); |
5938 | dev_get_stats(dev, storage: sp); |
5939 | } |
5940 | |
5941 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_XSTATS, idxattr: *idxattr)) { |
5942 | const struct rtnl_link_ops *ops = dev->rtnl_link_ops; |
5943 | |
5944 | if (ops && ops->fill_linkxstats) { |
5945 | *idxattr = IFLA_STATS_LINK_XSTATS; |
5946 | attr = nla_nest_start_noflag(skb, |
5947 | attrtype: IFLA_STATS_LINK_XSTATS); |
5948 | if (!attr) { |
5949 | err = -EMSGSIZE; |
5950 | goto nla_put_failure; |
5951 | } |
5952 | |
5953 | err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); |
5954 | nla_nest_end(skb, start: attr); |
5955 | if (err) |
5956 | goto nla_put_failure; |
5957 | *idxattr = 0; |
5958 | } |
5959 | } |
5960 | |
5961 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_XSTATS_SLAVE, |
5962 | idxattr: *idxattr)) { |
5963 | const struct rtnl_link_ops *ops = NULL; |
5964 | const struct net_device *master; |
5965 | |
5966 | master = netdev_master_upper_dev_get(dev); |
5967 | if (master) |
5968 | ops = master->rtnl_link_ops; |
5969 | if (ops && ops->fill_linkxstats) { |
5970 | *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; |
5971 | attr = nla_nest_start_noflag(skb, |
5972 | attrtype: IFLA_STATS_LINK_XSTATS_SLAVE); |
5973 | if (!attr) { |
5974 | err = -EMSGSIZE; |
5975 | goto nla_put_failure; |
5976 | } |
5977 | |
5978 | err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); |
5979 | nla_nest_end(skb, start: attr); |
5980 | if (err) |
5981 | goto nla_put_failure; |
5982 | *idxattr = 0; |
5983 | } |
5984 | } |
5985 | |
5986 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_OFFLOAD_XSTATS, |
5987 | idxattr: *idxattr)) { |
5988 | u32 off_filter_mask; |
5989 | |
5990 | off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; |
5991 | *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; |
5992 | attr = nla_nest_start_noflag(skb, |
5993 | attrtype: IFLA_STATS_LINK_OFFLOAD_XSTATS); |
5994 | if (!attr) { |
5995 | err = -EMSGSIZE; |
5996 | goto nla_put_failure; |
5997 | } |
5998 | |
5999 | err = rtnl_offload_xstats_fill(skb, dev, prividx, |
6000 | off_filter_mask, extack); |
6001 | if (err == -ENODATA) |
6002 | nla_nest_cancel(skb, start: attr); |
6003 | else |
6004 | nla_nest_end(skb, start: attr); |
6005 | |
6006 | if (err && err != -ENODATA) |
6007 | goto nla_put_failure; |
6008 | *idxattr = 0; |
6009 | } |
6010 | |
6011 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_AF_SPEC, idxattr: *idxattr)) { |
6012 | struct rtnl_af_ops *af_ops; |
6013 | |
6014 | *idxattr = IFLA_STATS_AF_SPEC; |
6015 | attr = nla_nest_start_noflag(skb, attrtype: IFLA_STATS_AF_SPEC); |
6016 | if (!attr) { |
6017 | err = -EMSGSIZE; |
6018 | goto nla_put_failure; |
6019 | } |
6020 | |
6021 | rcu_read_lock(); |
6022 | list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { |
6023 | if (af_ops->fill_stats_af) { |
6024 | struct nlattr *af; |
6025 | |
6026 | af = nla_nest_start_noflag(skb, |
6027 | attrtype: af_ops->family); |
6028 | if (!af) { |
6029 | rcu_read_unlock(); |
6030 | err = -EMSGSIZE; |
6031 | goto nla_put_failure; |
6032 | } |
6033 | err = af_ops->fill_stats_af(skb, dev); |
6034 | |
6035 | if (err == -ENODATA) { |
6036 | nla_nest_cancel(skb, start: af); |
6037 | } else if (err < 0) { |
6038 | rcu_read_unlock(); |
6039 | goto nla_put_failure; |
6040 | } |
6041 | |
6042 | nla_nest_end(skb, start: af); |
6043 | } |
6044 | } |
6045 | rcu_read_unlock(); |
6046 | |
6047 | nla_nest_end(skb, start: attr); |
6048 | |
6049 | *idxattr = 0; |
6050 | } |
6051 | |
6052 | nlmsg_end(skb, nlh); |
6053 | |
6054 | return 0; |
6055 | |
6056 | nla_put_failure: |
6057 | /* not a multi message or no progress mean a real error */ |
6058 | if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) |
6059 | nlmsg_cancel(skb, nlh); |
6060 | else |
6061 | nlmsg_end(skb, nlh); |
6062 | |
6063 | return err; |
6064 | } |
6065 | |
6066 | static size_t if_nlmsg_stats_size(const struct net_device *dev, |
6067 | const struct rtnl_stats_dump_filters *filters) |
6068 | { |
6069 | size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); |
6070 | unsigned int filter_mask = filters->mask[0]; |
6071 | |
6072 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_64, idxattr: 0)) |
6073 | size += nla_total_size_64bit(payload: sizeof(struct rtnl_link_stats64)); |
6074 | |
6075 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_XSTATS, idxattr: 0)) { |
6076 | const struct rtnl_link_ops *ops = dev->rtnl_link_ops; |
6077 | int attr = IFLA_STATS_LINK_XSTATS; |
6078 | |
6079 | if (ops && ops->get_linkxstats_size) { |
6080 | size += nla_total_size(payload: ops->get_linkxstats_size(dev, |
6081 | attr)); |
6082 | /* for IFLA_STATS_LINK_XSTATS */ |
6083 | size += nla_total_size(payload: 0); |
6084 | } |
6085 | } |
6086 | |
6087 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_XSTATS_SLAVE, idxattr: 0)) { |
6088 | struct net_device *_dev = (struct net_device *)dev; |
6089 | const struct rtnl_link_ops *ops = NULL; |
6090 | const struct net_device *master; |
6091 | |
6092 | /* netdev_master_upper_dev_get can't take const */ |
6093 | master = netdev_master_upper_dev_get(dev: _dev); |
6094 | if (master) |
6095 | ops = master->rtnl_link_ops; |
6096 | if (ops && ops->get_linkxstats_size) { |
6097 | int attr = IFLA_STATS_LINK_XSTATS_SLAVE; |
6098 | |
6099 | size += nla_total_size(payload: ops->get_linkxstats_size(dev, |
6100 | attr)); |
6101 | /* for IFLA_STATS_LINK_XSTATS_SLAVE */ |
6102 | size += nla_total_size(payload: 0); |
6103 | } |
6104 | } |
6105 | |
6106 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_LINK_OFFLOAD_XSTATS, idxattr: 0)) { |
6107 | u32 off_filter_mask; |
6108 | |
6109 | off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; |
6110 | size += rtnl_offload_xstats_get_size(dev, off_filter_mask); |
6111 | } |
6112 | |
6113 | if (stats_attr_valid(mask: filter_mask, attrid: IFLA_STATS_AF_SPEC, idxattr: 0)) { |
6114 | struct rtnl_af_ops *af_ops; |
6115 | |
6116 | /* for IFLA_STATS_AF_SPEC */ |
6117 | size += nla_total_size(payload: 0); |
6118 | |
6119 | rcu_read_lock(); |
6120 | list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { |
6121 | if (af_ops->get_stats_af_size) { |
6122 | size += nla_total_size( |
6123 | payload: af_ops->get_stats_af_size(dev)); |
6124 | |
6125 | /* for AF_* */ |
6126 | size += nla_total_size(payload: 0); |
6127 | } |
6128 | } |
6129 | rcu_read_unlock(); |
6130 | } |
6131 | |
6132 | return size; |
6133 | } |
6134 | |
6135 | #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) |
6136 | |
6137 | static const struct nla_policy |
6138 | rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { |
6139 | [IFLA_STATS_LINK_OFFLOAD_XSTATS] = |
6140 | NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), |
6141 | }; |
6142 | |
6143 | static const struct nla_policy |
6144 | rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { |
6145 | [IFLA_STATS_GET_FILTERS] = |
6146 | NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), |
6147 | }; |
6148 | |
6149 | static const struct nla_policy |
6150 | ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { |
6151 | [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), |
6152 | }; |
6153 | |
6154 | static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, |
6155 | struct rtnl_stats_dump_filters *filters, |
6156 | struct netlink_ext_ack *extack) |
6157 | { |
6158 | struct nlattr *tb[IFLA_STATS_MAX + 1]; |
6159 | int err; |
6160 | int at; |
6161 | |
6162 | err = nla_parse_nested(tb, IFLA_STATS_MAX, nla: ifla_filters, |
6163 | policy: rtnl_stats_get_policy_filters, extack); |
6164 | if (err < 0) |
6165 | return err; |
6166 | |
6167 | for (at = 1; at <= IFLA_STATS_MAX; at++) { |
6168 | if (tb[at]) { |
6169 | if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { |
6170 | NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask" ); |
6171 | return -EINVAL; |
6172 | } |
6173 | filters->mask[at] = nla_get_u32(nla: tb[at]); |
6174 | } |
6175 | } |
6176 | |
6177 | return 0; |
6178 | } |
6179 | |
6180 | static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, |
6181 | u32 filter_mask, |
6182 | struct rtnl_stats_dump_filters *filters, |
6183 | struct netlink_ext_ack *extack) |
6184 | { |
6185 | struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; |
6186 | int err; |
6187 | int i; |
6188 | |
6189 | filters->mask[0] = filter_mask; |
6190 | for (i = 1; i < ARRAY_SIZE(filters->mask); i++) |
6191 | filters->mask[i] = -1U; |
6192 | |
6193 | err = nlmsg_parse(nlh, hdrlen: sizeof(struct if_stats_msg), tb, |
6194 | IFLA_STATS_GETSET_MAX, policy: rtnl_stats_get_policy, extack); |
6195 | if (err < 0) |
6196 | return err; |
6197 | |
6198 | if (tb[IFLA_STATS_GET_FILTERS]) { |
6199 | err = rtnl_stats_get_parse_filters(ifla_filters: tb[IFLA_STATS_GET_FILTERS], |
6200 | filters, extack); |
6201 | if (err) |
6202 | return err; |
6203 | } |
6204 | |
6205 | return 0; |
6206 | } |
6207 | |
6208 | static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, |
6209 | bool is_dump, struct netlink_ext_ack *extack) |
6210 | { |
6211 | struct if_stats_msg *ifsm; |
6212 | |
6213 | ifsm = nlmsg_payload(nlh, len: sizeof(*ifsm)); |
6214 | if (!ifsm) { |
6215 | NL_SET_ERR_MSG(extack, "Invalid header for stats dump" ); |
6216 | return -EINVAL; |
6217 | } |
6218 | |
6219 | if (!strict_check) |
6220 | return 0; |
6221 | |
6222 | /* only requests using strict checks can pass data to influence |
6223 | * the dump. The legacy exception is filter_mask. |
6224 | */ |
6225 | if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { |
6226 | NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request" ); |
6227 | return -EINVAL; |
6228 | } |
6229 | if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { |
6230 | NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask" ); |
6231 | return -EINVAL; |
6232 | } |
6233 | |
6234 | return 0; |
6235 | } |
6236 | |
6237 | static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, |
6238 | struct netlink_ext_ack *extack) |
6239 | { |
6240 | struct rtnl_stats_dump_filters filters; |
6241 | struct net *net = sock_net(sk: skb->sk); |
6242 | struct net_device *dev = NULL; |
6243 | int idxattr = 0, prividx = 0; |
6244 | struct if_stats_msg *ifsm; |
6245 | struct sk_buff *nskb; |
6246 | int err; |
6247 | |
6248 | err = rtnl_valid_stats_req(nlh, strict_check: netlink_strict_get_check(skb), |
6249 | is_dump: false, extack); |
6250 | if (err) |
6251 | return err; |
6252 | |
6253 | ifsm = nlmsg_data(nlh); |
6254 | if (ifsm->ifindex > 0) |
6255 | dev = __dev_get_by_index(net, ifindex: ifsm->ifindex); |
6256 | else |
6257 | return -EINVAL; |
6258 | |
6259 | if (!dev) |
6260 | return -ENODEV; |
6261 | |
6262 | if (!ifsm->filter_mask) { |
6263 | NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get" ); |
6264 | return -EINVAL; |
6265 | } |
6266 | |
6267 | err = rtnl_stats_get_parse(nlh, filter_mask: ifsm->filter_mask, filters: &filters, extack); |
6268 | if (err) |
6269 | return err; |
6270 | |
6271 | nskb = nlmsg_new(payload: if_nlmsg_stats_size(dev, filters: &filters), GFP_KERNEL); |
6272 | if (!nskb) |
6273 | return -ENOBUFS; |
6274 | |
6275 | err = rtnl_fill_statsinfo(skb: nskb, dev, RTM_NEWSTATS, |
6276 | NETLINK_CB(skb).portid, seq: nlh->nlmsg_seq, change: 0, |
6277 | flags: 0, filters: &filters, idxattr: &idxattr, prividx: &prividx, extack); |
6278 | if (err < 0) { |
6279 | /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ |
6280 | WARN_ON(err == -EMSGSIZE); |
6281 | kfree_skb(skb: nskb); |
6282 | } else { |
6283 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); |
6284 | } |
6285 | |
6286 | return err; |
6287 | } |
6288 | |
6289 | static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) |
6290 | { |
6291 | struct netlink_ext_ack *extack = cb->extack; |
6292 | struct rtnl_stats_dump_filters filters; |
6293 | struct net *net = sock_net(sk: skb->sk); |
6294 | unsigned int flags = NLM_F_MULTI; |
6295 | struct if_stats_msg *ifsm; |
6296 | struct { |
6297 | unsigned long ifindex; |
6298 | int idxattr; |
6299 | int prividx; |
6300 | } *ctx = (void *)cb->ctx; |
6301 | struct net_device *dev; |
6302 | int err; |
6303 | |
6304 | cb->seq = net->dev_base_seq; |
6305 | |
6306 | err = rtnl_valid_stats_req(nlh: cb->nlh, strict_check: cb->strict_check, is_dump: true, extack); |
6307 | if (err) |
6308 | return err; |
6309 | |
6310 | ifsm = nlmsg_data(nlh: cb->nlh); |
6311 | if (!ifsm->filter_mask) { |
6312 | NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump" ); |
6313 | return -EINVAL; |
6314 | } |
6315 | |
6316 | err = rtnl_stats_get_parse(nlh: cb->nlh, filter_mask: ifsm->filter_mask, filters: &filters, |
6317 | extack); |
6318 | if (err) |
6319 | return err; |
6320 | |
6321 | for_each_netdev_dump(net, dev, ctx->ifindex) { |
6322 | err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, |
6323 | NETLINK_CB(cb->skb).portid, |
6324 | seq: cb->nlh->nlmsg_seq, change: 0, |
6325 | flags, filters: &filters, |
6326 | idxattr: &ctx->idxattr, prividx: &ctx->prividx, |
6327 | extack); |
6328 | /* If we ran out of room on the first message, |
6329 | * we're in trouble. |
6330 | */ |
6331 | WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); |
6332 | |
6333 | if (err < 0) |
6334 | break; |
6335 | ctx->prividx = 0; |
6336 | ctx->idxattr = 0; |
6337 | nl_dump_check_consistent(cb, nlh: nlmsg_hdr(skb)); |
6338 | } |
6339 | |
6340 | return err; |
6341 | } |
6342 | |
6343 | void rtnl_offload_xstats_notify(struct net_device *dev) |
6344 | { |
6345 | struct rtnl_stats_dump_filters response_filters = {}; |
6346 | struct net *net = dev_net(dev); |
6347 | int idxattr = 0, prividx = 0; |
6348 | struct sk_buff *skb; |
6349 | int err = -ENOBUFS; |
6350 | |
6351 | ASSERT_RTNL(); |
6352 | |
6353 | response_filters.mask[0] |= |
6354 | IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); |
6355 | response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= |
6356 | IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); |
6357 | |
6358 | skb = nlmsg_new(payload: if_nlmsg_stats_size(dev, filters: &response_filters), |
6359 | GFP_KERNEL); |
6360 | if (!skb) |
6361 | goto errout; |
6362 | |
6363 | err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, pid: 0, seq: 0, change: 0, flags: 0, |
6364 | filters: &response_filters, idxattr: &idxattr, prividx: &prividx, NULL); |
6365 | if (err < 0) { |
6366 | kfree_skb(skb); |
6367 | goto errout; |
6368 | } |
6369 | |
6370 | rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); |
6371 | return; |
6372 | |
6373 | errout: |
6374 | rtnl_set_sk_err(net, RTNLGRP_STATS, err); |
6375 | } |
6376 | EXPORT_SYMBOL(rtnl_offload_xstats_notify); |
6377 | |
6378 | static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, |
6379 | struct netlink_ext_ack *extack) |
6380 | { |
6381 | enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; |
6382 | struct rtnl_stats_dump_filters response_filters = {}; |
6383 | struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; |
6384 | struct net *net = sock_net(sk: skb->sk); |
6385 | struct net_device *dev = NULL; |
6386 | struct if_stats_msg *ifsm; |
6387 | bool notify = false; |
6388 | int err; |
6389 | |
6390 | err = rtnl_valid_stats_req(nlh, strict_check: netlink_strict_get_check(skb), |
6391 | is_dump: false, extack); |
6392 | if (err) |
6393 | return err; |
6394 | |
6395 | ifsm = nlmsg_data(nlh); |
6396 | if (ifsm->family != AF_UNSPEC) { |
6397 | NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC" ); |
6398 | return -EINVAL; |
6399 | } |
6400 | |
6401 | if (ifsm->ifindex > 0) |
6402 | dev = __dev_get_by_index(net, ifindex: ifsm->ifindex); |
6403 | else |
6404 | return -EINVAL; |
6405 | |
6406 | if (!dev) |
6407 | return -ENODEV; |
6408 | |
6409 | if (ifsm->filter_mask) { |
6410 | NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set" ); |
6411 | return -EINVAL; |
6412 | } |
6413 | |
6414 | err = nlmsg_parse(nlh, hdrlen: sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, |
6415 | policy: ifla_stats_set_policy, extack); |
6416 | if (err < 0) |
6417 | return err; |
6418 | |
6419 | if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { |
6420 | u8 req = nla_get_u8(nla: tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); |
6421 | |
6422 | if (req) |
6423 | err = netdev_offload_xstats_enable(dev, type: t_l3, extack); |
6424 | else |
6425 | err = netdev_offload_xstats_disable(dev, type: t_l3); |
6426 | |
6427 | if (!err) |
6428 | notify = true; |
6429 | else if (err != -EALREADY) |
6430 | return err; |
6431 | |
6432 | response_filters.mask[0] |= |
6433 | IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); |
6434 | response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= |
6435 | IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); |
6436 | } |
6437 | |
6438 | if (notify) |
6439 | rtnl_offload_xstats_notify(dev); |
6440 | |
6441 | return 0; |
6442 | } |
6443 | |
6444 | static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, |
6445 | struct netlink_ext_ack *extack) |
6446 | { |
6447 | struct br_port_msg *bpm; |
6448 | |
6449 | bpm = nlmsg_payload(nlh, len: sizeof(*bpm)); |
6450 | if (!bpm) { |
6451 | NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request" ); |
6452 | return -EINVAL; |
6453 | } |
6454 | |
6455 | if (bpm->ifindex) { |
6456 | NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request" ); |
6457 | return -EINVAL; |
6458 | } |
6459 | if (nlmsg_attrlen(nlh, hdrlen: sizeof(*bpm))) { |
6460 | NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request" ); |
6461 | return -EINVAL; |
6462 | } |
6463 | |
6464 | return 0; |
6465 | } |
6466 | |
6467 | struct rtnl_mdb_dump_ctx { |
6468 | long idx; |
6469 | }; |
6470 | |
6471 | static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) |
6472 | { |
6473 | struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; |
6474 | struct net *net = sock_net(sk: skb->sk); |
6475 | struct net_device *dev; |
6476 | int idx, s_idx; |
6477 | int err; |
6478 | |
6479 | NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx); |
6480 | |
6481 | if (cb->strict_check) { |
6482 | err = rtnl_mdb_valid_dump_req(nlh: cb->nlh, extack: cb->extack); |
6483 | if (err) |
6484 | return err; |
6485 | } |
6486 | |
6487 | s_idx = ctx->idx; |
6488 | idx = 0; |
6489 | |
6490 | for_each_netdev(net, dev) { |
6491 | if (idx < s_idx) |
6492 | goto skip; |
6493 | if (!dev->netdev_ops->ndo_mdb_dump) |
6494 | goto skip; |
6495 | |
6496 | err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); |
6497 | if (err == -EMSGSIZE) |
6498 | goto out; |
6499 | /* Moving on to next device, reset markers and sequence |
6500 | * counters since they are all maintained per-device. |
6501 | */ |
6502 | memset(cb->ctx, 0, sizeof(cb->ctx)); |
6503 | cb->prev_seq = 0; |
6504 | cb->seq = 0; |
6505 | skip: |
6506 | idx++; |
6507 | } |
6508 | |
6509 | out: |
6510 | ctx->idx = idx; |
6511 | return skb->len; |
6512 | } |
6513 | |
6514 | static int rtnl_validate_mdb_entry_get(const struct nlattr *attr, |
6515 | struct netlink_ext_ack *extack) |
6516 | { |
6517 | struct br_mdb_entry *entry = nla_data(nla: attr); |
6518 | |
6519 | if (nla_len(nla: attr) != sizeof(struct br_mdb_entry)) { |
6520 | NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length" ); |
6521 | return -EINVAL; |
6522 | } |
6523 | |
6524 | if (entry->ifindex) { |
6525 | NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified" ); |
6526 | return -EINVAL; |
6527 | } |
6528 | |
6529 | if (entry->state) { |
6530 | NL_SET_ERR_MSG(extack, "Entry state cannot be specified" ); |
6531 | return -EINVAL; |
6532 | } |
6533 | |
6534 | if (entry->flags) { |
6535 | NL_SET_ERR_MSG(extack, "Entry flags cannot be specified" ); |
6536 | return -EINVAL; |
6537 | } |
6538 | |
6539 | if (entry->vid >= VLAN_VID_MASK) { |
6540 | NL_SET_ERR_MSG(extack, "Invalid entry VLAN id" ); |
6541 | return -EINVAL; |
6542 | } |
6543 | |
6544 | if (entry->addr.proto != htons(ETH_P_IP) && |
6545 | entry->addr.proto != htons(ETH_P_IPV6) && |
6546 | entry->addr.proto != 0) { |
6547 | NL_SET_ERR_MSG(extack, "Unknown entry protocol" ); |
6548 | return -EINVAL; |
6549 | } |
6550 | |
6551 | return 0; |
6552 | } |
6553 | |
6554 | static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = { |
6555 | [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, |
6556 | rtnl_validate_mdb_entry_get, |
6557 | sizeof(struct br_mdb_entry)), |
6558 | [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED }, |
6559 | }; |
6560 | |
6561 | static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, |
6562 | struct netlink_ext_ack *extack) |
6563 | { |
6564 | struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1]; |
6565 | struct net *net = sock_net(sk: in_skb->sk); |
6566 | struct br_port_msg *bpm; |
6567 | struct net_device *dev; |
6568 | int err; |
6569 | |
6570 | err = nlmsg_parse(nlh, hdrlen: sizeof(struct br_port_msg), tb, |
6571 | MDBA_GET_ENTRY_MAX, policy: mdba_get_policy, extack); |
6572 | if (err) |
6573 | return err; |
6574 | |
6575 | bpm = nlmsg_data(nlh); |
6576 | if (!bpm->ifindex) { |
6577 | NL_SET_ERR_MSG(extack, "Invalid ifindex" ); |
6578 | return -EINVAL; |
6579 | } |
6580 | |
6581 | dev = __dev_get_by_index(net, ifindex: bpm->ifindex); |
6582 | if (!dev) { |
6583 | NL_SET_ERR_MSG(extack, "Device doesn't exist" ); |
6584 | return -ENODEV; |
6585 | } |
6586 | |
6587 | if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) { |
6588 | NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute" ); |
6589 | return -EINVAL; |
6590 | } |
6591 | |
6592 | if (!dev->netdev_ops->ndo_mdb_get) { |
6593 | NL_SET_ERR_MSG(extack, "Device does not support MDB operations" ); |
6594 | return -EOPNOTSUPP; |
6595 | } |
6596 | |
6597 | return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid, |
6598 | nlh->nlmsg_seq, extack); |
6599 | } |
6600 | |
6601 | static int rtnl_validate_mdb_entry(const struct nlattr *attr, |
6602 | struct netlink_ext_ack *extack) |
6603 | { |
6604 | struct br_mdb_entry *entry = nla_data(nla: attr); |
6605 | |
6606 | if (nla_len(nla: attr) != sizeof(struct br_mdb_entry)) { |
6607 | NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length" ); |
6608 | return -EINVAL; |
6609 | } |
6610 | |
6611 | if (entry->ifindex == 0) { |
6612 | NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed" ); |
6613 | return -EINVAL; |
6614 | } |
6615 | |
6616 | if (entry->addr.proto == htons(ETH_P_IP)) { |
6617 | if (!ipv4_is_multicast(addr: entry->addr.u.ip4) && |
6618 | !ipv4_is_zeronet(addr: entry->addr.u.ip4)) { |
6619 | NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0" ); |
6620 | return -EINVAL; |
6621 | } |
6622 | if (ipv4_is_local_multicast(addr: entry->addr.u.ip4)) { |
6623 | NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast" ); |
6624 | return -EINVAL; |
6625 | } |
6626 | #if IS_ENABLED(CONFIG_IPV6) |
6627 | } else if (entry->addr.proto == htons(ETH_P_IPV6)) { |
6628 | if (ipv6_addr_is_ll_all_nodes(addr: &entry->addr.u.ip6)) { |
6629 | NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes" ); |
6630 | return -EINVAL; |
6631 | } |
6632 | #endif |
6633 | } else if (entry->addr.proto == 0) { |
6634 | /* L2 mdb */ |
6635 | if (!is_multicast_ether_addr(addr: entry->addr.u.mac_addr)) { |
6636 | NL_SET_ERR_MSG(extack, "L2 entry group is not multicast" ); |
6637 | return -EINVAL; |
6638 | } |
6639 | } else { |
6640 | NL_SET_ERR_MSG(extack, "Unknown entry protocol" ); |
6641 | return -EINVAL; |
6642 | } |
6643 | |
6644 | if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { |
6645 | NL_SET_ERR_MSG(extack, "Unknown entry state" ); |
6646 | return -EINVAL; |
6647 | } |
6648 | if (entry->vid >= VLAN_VID_MASK) { |
6649 | NL_SET_ERR_MSG(extack, "Invalid entry VLAN id" ); |
6650 | return -EINVAL; |
6651 | } |
6652 | |
6653 | return 0; |
6654 | } |
6655 | |
6656 | static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { |
6657 | [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, |
6658 | [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, |
6659 | rtnl_validate_mdb_entry, |
6660 | sizeof(struct br_mdb_entry)), |
6661 | [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, |
6662 | }; |
6663 | |
6664 | static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, |
6665 | struct netlink_ext_ack *extack) |
6666 | { |
6667 | struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; |
6668 | struct net *net = sock_net(sk: skb->sk); |
6669 | struct br_port_msg *bpm; |
6670 | struct net_device *dev; |
6671 | int err; |
6672 | |
6673 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*bpm), tb, |
6674 | MDBA_SET_ENTRY_MAX, policy: mdba_policy, extack); |
6675 | if (err) |
6676 | return err; |
6677 | |
6678 | bpm = nlmsg_data(nlh); |
6679 | if (!bpm->ifindex) { |
6680 | NL_SET_ERR_MSG(extack, "Invalid ifindex" ); |
6681 | return -EINVAL; |
6682 | } |
6683 | |
6684 | dev = __dev_get_by_index(net, ifindex: bpm->ifindex); |
6685 | if (!dev) { |
6686 | NL_SET_ERR_MSG(extack, "Device doesn't exist" ); |
6687 | return -ENODEV; |
6688 | } |
6689 | |
6690 | if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { |
6691 | NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute" ); |
6692 | return -EINVAL; |
6693 | } |
6694 | |
6695 | if (!dev->netdev_ops->ndo_mdb_add) { |
6696 | NL_SET_ERR_MSG(extack, "Device does not support MDB operations" ); |
6697 | return -EOPNOTSUPP; |
6698 | } |
6699 | |
6700 | return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); |
6701 | } |
6702 | |
6703 | static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr, |
6704 | struct netlink_ext_ack *extack) |
6705 | { |
6706 | struct br_mdb_entry *entry = nla_data(nla: attr); |
6707 | struct br_mdb_entry zero_entry = {}; |
6708 | |
6709 | if (nla_len(nla: attr) != sizeof(struct br_mdb_entry)) { |
6710 | NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length" ); |
6711 | return -EINVAL; |
6712 | } |
6713 | |
6714 | if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { |
6715 | NL_SET_ERR_MSG(extack, "Unknown entry state" ); |
6716 | return -EINVAL; |
6717 | } |
6718 | |
6719 | if (entry->flags) { |
6720 | NL_SET_ERR_MSG(extack, "Entry flags cannot be set" ); |
6721 | return -EINVAL; |
6722 | } |
6723 | |
6724 | if (entry->vid >= VLAN_N_VID - 1) { |
6725 | NL_SET_ERR_MSG(extack, "Invalid entry VLAN id" ); |
6726 | return -EINVAL; |
6727 | } |
6728 | |
6729 | if (memcmp(p: &entry->addr, q: &zero_entry.addr, size: sizeof(entry->addr))) { |
6730 | NL_SET_ERR_MSG(extack, "Entry address cannot be set" ); |
6731 | return -EINVAL; |
6732 | } |
6733 | |
6734 | return 0; |
6735 | } |
6736 | |
6737 | static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = { |
6738 | [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, |
6739 | rtnl_validate_mdb_entry_del_bulk, |
6740 | sizeof(struct br_mdb_entry)), |
6741 | [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, |
6742 | }; |
6743 | |
6744 | static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, |
6745 | struct netlink_ext_ack *extack) |
6746 | { |
6747 | bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); |
6748 | struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; |
6749 | struct net *net = sock_net(sk: skb->sk); |
6750 | struct br_port_msg *bpm; |
6751 | struct net_device *dev; |
6752 | int err; |
6753 | |
6754 | if (!del_bulk) |
6755 | err = nlmsg_parse_deprecated(nlh, hdrlen: sizeof(*bpm), tb, |
6756 | MDBA_SET_ENTRY_MAX, policy: mdba_policy, |
6757 | extack); |
6758 | else |
6759 | err = nlmsg_parse(nlh, hdrlen: sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, |
6760 | policy: mdba_del_bulk_policy, extack); |
6761 | if (err) |
6762 | return err; |
6763 | |
6764 | bpm = nlmsg_data(nlh); |
6765 | if (!bpm->ifindex) { |
6766 | NL_SET_ERR_MSG(extack, "Invalid ifindex" ); |
6767 | return -EINVAL; |
6768 | } |
6769 | |
6770 | dev = __dev_get_by_index(net, ifindex: bpm->ifindex); |
6771 | if (!dev) { |
6772 | NL_SET_ERR_MSG(extack, "Device doesn't exist" ); |
6773 | return -ENODEV; |
6774 | } |
6775 | |
6776 | if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { |
6777 | NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute" ); |
6778 | return -EINVAL; |
6779 | } |
6780 | |
6781 | if (del_bulk) { |
6782 | if (!dev->netdev_ops->ndo_mdb_del_bulk) { |
6783 | NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion" ); |
6784 | return -EOPNOTSUPP; |
6785 | } |
6786 | return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack); |
6787 | } |
6788 | |
6789 | if (!dev->netdev_ops->ndo_mdb_del) { |
6790 | NL_SET_ERR_MSG(extack, "Device does not support MDB operations" ); |
6791 | return -EOPNOTSUPP; |
6792 | } |
6793 | |
6794 | return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); |
6795 | } |
6796 | |
6797 | /* Process one rtnetlink message. */ |
6798 | |
6799 | static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) |
6800 | { |
6801 | const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED); |
6802 | rtnl_dumpit_func dumpit = cb->data; |
6803 | int err; |
6804 | |
6805 | /* Previous iteration have already finished, avoid calling->dumpit() |
6806 | * again, it may not expect to be called after it reached the end. |
6807 | */ |
6808 | if (!dumpit) |
6809 | return 0; |
6810 | |
6811 | if (needs_lock) |
6812 | rtnl_lock(); |
6813 | err = dumpit(skb, cb); |
6814 | if (needs_lock) |
6815 | rtnl_unlock(); |
6816 | |
6817 | /* Old dump handlers used to send NLM_DONE as in a separate recvmsg(). |
6818 | * Some applications which parse netlink manually depend on this. |
6819 | */ |
6820 | if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { |
6821 | if (err < 0 && err != -EMSGSIZE) |
6822 | return err; |
6823 | if (!err) |
6824 | cb->data = NULL; |
6825 | |
6826 | return skb->len; |
6827 | } |
6828 | return err; |
6829 | } |
6830 | |
6831 | static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
6832 | const struct nlmsghdr *nlh, |
6833 | struct netlink_dump_control *control) |
6834 | { |
6835 | if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE || |
6836 | !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) { |
6837 | WARN_ON(control->data); |
6838 | control->data = control->dump; |
6839 | control->dump = rtnl_dumpit; |
6840 | } |
6841 | |
6842 | return netlink_dump_start(ssk, skb, nlh, control); |
6843 | } |
6844 | |
6845 | static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, |
6846 | struct netlink_ext_ack *extack) |
6847 | { |
6848 | struct net *net = sock_net(sk: skb->sk); |
6849 | struct rtnl_link *link; |
6850 | enum rtnl_kinds kind; |
6851 | struct module *owner; |
6852 | int err = -EOPNOTSUPP; |
6853 | rtnl_doit_func doit; |
6854 | unsigned int flags; |
6855 | int family; |
6856 | int type; |
6857 | |
6858 | type = nlh->nlmsg_type; |
6859 | if (type > RTM_MAX) |
6860 | return -EOPNOTSUPP; |
6861 | |
6862 | type -= RTM_BASE; |
6863 | |
6864 | /* All the messages must have at least 1 byte length */ |
6865 | if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) |
6866 | return 0; |
6867 | |
6868 | family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; |
6869 | kind = rtnl_msgtype_kind(msgtype: type); |
6870 | |
6871 | if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) |
6872 | return -EPERM; |
6873 | |
6874 | rcu_read_lock(); |
6875 | if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { |
6876 | struct sock *rtnl; |
6877 | rtnl_dumpit_func dumpit; |
6878 | u32 min_dump_alloc = 0; |
6879 | |
6880 | link = rtnl_get_link(protocol: family, msgtype: type); |
6881 | if (!link || !link->dumpit) { |
6882 | family = PF_UNSPEC; |
6883 | link = rtnl_get_link(protocol: family, msgtype: type); |
6884 | if (!link || !link->dumpit) |
6885 | goto err_unlock; |
6886 | } |
6887 | owner = link->owner; |
6888 | dumpit = link->dumpit; |
6889 | flags = link->flags; |
6890 | |
6891 | if (type == RTM_GETLINK - RTM_BASE) |
6892 | min_dump_alloc = rtnl_calcit(skb, nlh); |
6893 | |
6894 | err = 0; |
6895 | /* need to do this before rcu_read_unlock() */ |
6896 | if (!try_module_get(module: owner)) |
6897 | err = -EPROTONOSUPPORT; |
6898 | |
6899 | rcu_read_unlock(); |
6900 | |
6901 | rtnl = net->rtnl; |
6902 | if (err == 0) { |
6903 | struct netlink_dump_control c = { |
6904 | .dump = dumpit, |
6905 | .min_dump_alloc = min_dump_alloc, |
6906 | .module = owner, |
6907 | .flags = flags, |
6908 | }; |
6909 | err = rtnetlink_dump_start(ssk: rtnl, skb, nlh, control: &c); |
6910 | /* netlink_dump_start() will keep a reference on |
6911 | * module if dump is still in progress. |
6912 | */ |
6913 | module_put(module: owner); |
6914 | } |
6915 | return err; |
6916 | } |
6917 | |
6918 | link = rtnl_get_link(protocol: family, msgtype: type); |
6919 | if (!link || !link->doit) { |
6920 | family = PF_UNSPEC; |
6921 | link = rtnl_get_link(PF_UNSPEC, msgtype: type); |
6922 | if (!link || !link->doit) |
6923 | goto out_unlock; |
6924 | } |
6925 | |
6926 | owner = link->owner; |
6927 | if (!try_module_get(module: owner)) { |
6928 | err = -EPROTONOSUPPORT; |
6929 | goto out_unlock; |
6930 | } |
6931 | |
6932 | flags = link->flags; |
6933 | if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && |
6934 | !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { |
6935 | NL_SET_ERR_MSG(extack, "Bulk delete is not supported" ); |
6936 | module_put(module: owner); |
6937 | goto err_unlock; |
6938 | } |
6939 | |
6940 | if (flags & RTNL_FLAG_DOIT_UNLOCKED) { |
6941 | doit = link->doit; |
6942 | rcu_read_unlock(); |
6943 | if (doit) |
6944 | err = doit(skb, nlh, extack); |
6945 | module_put(module: owner); |
6946 | return err; |
6947 | } |
6948 | rcu_read_unlock(); |
6949 | |
6950 | rtnl_lock(); |
6951 | link = rtnl_get_link(protocol: family, msgtype: type); |
6952 | if (link && link->doit) |
6953 | err = link->doit(skb, nlh, extack); |
6954 | rtnl_unlock(); |
6955 | |
6956 | module_put(module: owner); |
6957 | |
6958 | return err; |
6959 | |
6960 | out_unlock: |
6961 | rcu_read_unlock(); |
6962 | return err; |
6963 | |
6964 | err_unlock: |
6965 | rcu_read_unlock(); |
6966 | return -EOPNOTSUPP; |
6967 | } |
6968 | |
6969 | static void rtnetlink_rcv(struct sk_buff *skb) |
6970 | { |
6971 | netlink_rcv_skb(skb, cb: &rtnetlink_rcv_msg); |
6972 | } |
6973 | |
6974 | static int rtnetlink_bind(struct net *net, int group) |
6975 | { |
6976 | switch (group) { |
6977 | case RTNLGRP_IPV4_MROUTE_R: |
6978 | case RTNLGRP_IPV6_MROUTE_R: |
6979 | if (!ns_capable(ns: net->user_ns, CAP_NET_ADMIN)) |
6980 | return -EPERM; |
6981 | break; |
6982 | } |
6983 | return 0; |
6984 | } |
6985 | |
6986 | static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) |
6987 | { |
6988 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
6989 | |
6990 | switch (event) { |
6991 | case NETDEV_REBOOT: |
6992 | case NETDEV_CHANGEMTU: |
6993 | case NETDEV_CHANGEADDR: |
6994 | case NETDEV_CHANGENAME: |
6995 | case NETDEV_FEAT_CHANGE: |
6996 | case NETDEV_BONDING_FAILOVER: |
6997 | case NETDEV_POST_TYPE_CHANGE: |
6998 | case NETDEV_NOTIFY_PEERS: |
6999 | case NETDEV_CHANGEUPPER: |
7000 | case NETDEV_RESEND_IGMP: |
7001 | case NETDEV_CHANGEINFODATA: |
7002 | case NETDEV_CHANGELOWERSTATE: |
7003 | case NETDEV_CHANGE_TX_QUEUE_LEN: |
7004 | rtmsg_ifinfo_event(RTM_NEWLINK, dev, change: 0, event: rtnl_get_event(event), |
7005 | GFP_KERNEL, NULL, new_ifindex: 0, portid: 0, NULL); |
7006 | break; |
7007 | default: |
7008 | break; |
7009 | } |
7010 | return NOTIFY_DONE; |
7011 | } |
7012 | |
7013 | static struct notifier_block rtnetlink_dev_notifier = { |
7014 | .notifier_call = rtnetlink_event, |
7015 | }; |
7016 | |
7017 | |
7018 | static int __net_init rtnetlink_net_init(struct net *net) |
7019 | { |
7020 | struct sock *sk; |
7021 | struct netlink_kernel_cfg cfg = { |
7022 | .groups = RTNLGRP_MAX, |
7023 | .input = rtnetlink_rcv, |
7024 | .flags = NL_CFG_F_NONROOT_RECV, |
7025 | .bind = rtnetlink_bind, |
7026 | }; |
7027 | |
7028 | sk = netlink_kernel_create(net, NETLINK_ROUTE, cfg: &cfg); |
7029 | if (!sk) |
7030 | return -ENOMEM; |
7031 | net->rtnl = sk; |
7032 | return 0; |
7033 | } |
7034 | |
7035 | static void __net_exit rtnetlink_net_exit(struct net *net) |
7036 | { |
7037 | netlink_kernel_release(sk: net->rtnl); |
7038 | net->rtnl = NULL; |
7039 | } |
7040 | |
7041 | static struct pernet_operations rtnetlink_net_ops = { |
7042 | .init = rtnetlink_net_init, |
7043 | .exit = rtnetlink_net_exit, |
7044 | }; |
7045 | |
7046 | static const struct rtnl_msg_handler rtnetlink_rtnl_msg_handlers[] __initconst = { |
7047 | {.msgtype = RTM_NEWLINK, .doit = rtnl_newlink, |
7048 | .flags = RTNL_FLAG_DOIT_PERNET}, |
7049 | {.msgtype = RTM_DELLINK, .doit = rtnl_dellink, |
7050 | .flags = RTNL_FLAG_DOIT_PERNET_WIP}, |
7051 | {.msgtype = RTM_GETLINK, .doit = rtnl_getlink, |
7052 | .dumpit = rtnl_dump_ifinfo, .flags = RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, |
7053 | {.msgtype = RTM_SETLINK, .doit = rtnl_setlink, |
7054 | .flags = RTNL_FLAG_DOIT_PERNET_WIP}, |
7055 | {.msgtype = RTM_GETADDR, .dumpit = rtnl_dump_all}, |
7056 | {.msgtype = RTM_GETROUTE, .dumpit = rtnl_dump_all}, |
7057 | {.msgtype = RTM_GETNETCONF, .dumpit = rtnl_dump_all}, |
7058 | {.msgtype = RTM_GETSTATS, .doit = rtnl_stats_get, |
7059 | .dumpit = rtnl_stats_dump}, |
7060 | {.msgtype = RTM_SETSTATS, .doit = rtnl_stats_set}, |
7061 | {.msgtype = RTM_NEWLINKPROP, .doit = rtnl_newlinkprop}, |
7062 | {.msgtype = RTM_DELLINKPROP, .doit = rtnl_dellinkprop}, |
7063 | {.protocol = PF_BRIDGE, .msgtype = RTM_GETLINK, |
7064 | .dumpit = rtnl_bridge_getlink}, |
7065 | {.protocol = PF_BRIDGE, .msgtype = RTM_DELLINK, |
7066 | .doit = rtnl_bridge_dellink}, |
7067 | {.protocol = PF_BRIDGE, .msgtype = RTM_SETLINK, |
7068 | .doit = rtnl_bridge_setlink}, |
7069 | {.protocol = PF_BRIDGE, .msgtype = RTM_NEWNEIGH, .doit = rtnl_fdb_add}, |
7070 | {.protocol = PF_BRIDGE, .msgtype = RTM_DELNEIGH, .doit = rtnl_fdb_del, |
7071 | .flags = RTNL_FLAG_BULK_DEL_SUPPORTED}, |
7072 | {.protocol = PF_BRIDGE, .msgtype = RTM_GETNEIGH, .doit = rtnl_fdb_get, |
7073 | .dumpit = rtnl_fdb_dump}, |
7074 | {.protocol = PF_BRIDGE, .msgtype = RTM_NEWMDB, .doit = rtnl_mdb_add}, |
7075 | {.protocol = PF_BRIDGE, .msgtype = RTM_DELMDB, .doit = rtnl_mdb_del, |
7076 | .flags = RTNL_FLAG_BULK_DEL_SUPPORTED}, |
7077 | {.protocol = PF_BRIDGE, .msgtype = RTM_GETMDB, .doit = rtnl_mdb_get, |
7078 | .dumpit = rtnl_mdb_dump}, |
7079 | }; |
7080 | |
7081 | void __init rtnetlink_init(void) |
7082 | { |
7083 | if (register_pernet_subsys(&rtnetlink_net_ops)) |
7084 | panic(fmt: "rtnetlink_init: cannot initialize rtnetlink\n" ); |
7085 | |
7086 | register_netdevice_notifier(nb: &rtnetlink_dev_notifier); |
7087 | |
7088 | rtnl_register_many(rtnetlink_rtnl_msg_handlers); |
7089 | } |
7090 | |