1/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#ifndef _NET_NETDEV_LOCK_H
4#define _NET_NETDEV_LOCK_H
5
6#include <linux/lockdep.h>
7#include <linux/netdevice.h>
8#include <linux/rtnetlink.h>
9
10static inline bool netdev_trylock(struct net_device *dev)
11{
12 return mutex_trylock(&dev->lock);
13}
14
15static inline void netdev_assert_locked(const struct net_device *dev)
16{
17 lockdep_assert_held(&dev->lock);
18}
19
20static inline void
21netdev_assert_locked_or_invisible(const struct net_device *dev)
22{
23 if (dev->reg_state == NETREG_REGISTERED ||
24 dev->reg_state == NETREG_UNREGISTERING)
25 netdev_assert_locked(dev);
26}
27
28static inline bool netdev_need_ops_lock(const struct net_device *dev)
29{
30 bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
31
32#if IS_ENABLED(CONFIG_NET_SHAPER)
33 ret |= !!dev->netdev_ops->net_shaper_ops;
34#endif
35
36 return ret;
37}
38
39static inline void netdev_lock_ops(struct net_device *dev)
40{
41 if (netdev_need_ops_lock(dev))
42 netdev_lock(dev);
43}
44
45static inline void netdev_unlock_ops(struct net_device *dev)
46{
47 if (netdev_need_ops_lock(dev))
48 netdev_unlock(dev);
49}
50
51static inline void netdev_lock_ops_to_full(struct net_device *dev)
52{
53 if (netdev_need_ops_lock(dev))
54 netdev_assert_locked(dev);
55 else
56 netdev_lock(dev);
57}
58
59static inline void netdev_unlock_full_to_ops(struct net_device *dev)
60{
61 if (netdev_need_ops_lock(dev))
62 netdev_assert_locked(dev);
63 else
64 netdev_unlock(dev);
65}
66
67static inline void netdev_ops_assert_locked(const struct net_device *dev)
68{
69 if (netdev_need_ops_lock(dev))
70 lockdep_assert_held(&dev->lock);
71 else
72 ASSERT_RTNL();
73}
74
75static inline void
76netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
77{
78 if (dev->reg_state == NETREG_REGISTERED ||
79 dev->reg_state == NETREG_UNREGISTERING)
80 netdev_ops_assert_locked(dev);
81}
82
83static inline void netdev_lock_ops_compat(struct net_device *dev)
84{
85 if (netdev_need_ops_lock(dev))
86 netdev_lock(dev);
87 else
88 rtnl_lock();
89}
90
91static inline void netdev_unlock_ops_compat(struct net_device *dev)
92{
93 if (netdev_need_ops_lock(dev))
94 netdev_unlock(dev);
95 else
96 rtnl_unlock();
97}
98
99static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
100 const struct lockdep_map *b)
101{
102 if (a == b)
103 return 0;
104
105 /* Allow locking multiple devices only under rtnl_lock,
106 * the exact order doesn't matter.
107 * Note that upper devices don't lock their ops, so nesting
108 * mostly happens in batched device removal for now.
109 */
110 return lockdep_rtnl_is_held() ? -1 : 1;
111}
112
113#define netdev_lockdep_set_classes(dev) \
114{ \
115 static struct lock_class_key qdisc_tx_busylock_key; \
116 static struct lock_class_key qdisc_xmit_lock_key; \
117 static struct lock_class_key dev_addr_list_lock_key; \
118 static struct lock_class_key dev_instance_lock_key; \
119 unsigned int i; \
120 \
121 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
122 lockdep_set_class(&(dev)->addr_list_lock, \
123 &dev_addr_list_lock_key); \
124 lockdep_set_class(&(dev)->lock, \
125 &dev_instance_lock_key); \
126 lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
127 for (i = 0; i < (dev)->num_tx_queues; i++) \
128 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
129 &qdisc_xmit_lock_key); \
130}
131
132#define netdev_lock_dereference(p, dev) \
133 rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
134
135int netdev_debug_event(struct notifier_block *nb, unsigned long event,
136 void *ptr);
137
138#endif
139

source code of linux/include/net/netdev_lock.h