1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/ethtool_netlink.h>
4#include <net/udp_tunnel.h>
5#include <net/vxlan.h>
6
7#include "bitset.h"
8#include "common.h"
9#include "netlink.h"
10
11const struct nla_policy ethnl_tunnel_info_get_policy[] = {
12 [ETHTOOL_A_TUNNEL_INFO_HEADER] =
13 NLA_POLICY_NESTED(ethnl_header_policy),
14};
15
16static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
17static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
18static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
19 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
20
21static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
22{
23 ssize_t size;
24
25 size = ethnl_bitset32_size(val: &types, NULL, nbits: __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
26 names: udp_tunnel_type_names, compact);
27 if (size < 0)
28 return size;
29
30 return size +
31 nla_total_size(payload: 0) + /* _UDP_TABLE */
32 nla_total_size(payload: sizeof(u32)); /* _UDP_TABLE_SIZE */
33}
34
35static ssize_t
36ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
37 struct netlink_ext_ack *extack)
38{
39 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
40 const struct udp_tunnel_nic_info *info;
41 unsigned int i;
42 ssize_t ret;
43 size_t size;
44
45 info = req_base->dev->udp_tunnel_nic_info;
46 if (!info) {
47 NL_SET_ERR_MSG(extack,
48 "device does not report tunnel offload info");
49 return -EOPNOTSUPP;
50 }
51
52 size = nla_total_size(payload: 0); /* _INFO_UDP_PORTS */
53
54 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
55 if (!info->tables[i].n_entries)
56 break;
57
58 ret = ethnl_udp_table_reply_size(types: info->tables[i].tunnel_types,
59 compact);
60 if (ret < 0)
61 return ret;
62 size += ret;
63
64 size += udp_tunnel_nic_dump_size(dev: req_base->dev, table: i);
65 }
66
67 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
68 ret = ethnl_udp_table_reply_size(types: 0, compact);
69 if (ret < 0)
70 return ret;
71 size += ret;
72
73 size += nla_total_size(payload: 0) + /* _TABLE_ENTRY */
74 nla_total_size(payload: sizeof(__be16)) + /* _ENTRY_PORT */
75 nla_total_size(payload: sizeof(u32)); /* _ENTRY_TYPE */
76 }
77
78 return size;
79}
80
81static int
82ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
83 struct sk_buff *skb)
84{
85 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
86 const struct udp_tunnel_nic_info *info;
87 struct nlattr *ports, *table, *entry;
88 unsigned int i;
89
90 info = req_base->dev->udp_tunnel_nic_info;
91 if (!info)
92 return -EOPNOTSUPP;
93
94 ports = nla_nest_start(skb, attrtype: ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
95 if (!ports)
96 return -EMSGSIZE;
97
98 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
99 if (!info->tables[i].n_entries)
100 break;
101
102 table = nla_nest_start(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE);
103 if (!table)
104 goto err_cancel_ports;
105
106 if (nla_put_u32(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
107 value: info->tables[i].n_entries))
108 goto err_cancel_table;
109
110 if (ethnl_put_bitset32(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
111 val: &info->tables[i].tunnel_types, NULL,
112 nbits: __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
113 names: udp_tunnel_type_names, compact))
114 goto err_cancel_table;
115
116 if (udp_tunnel_nic_dump_write(dev: req_base->dev, table: i, skb))
117 goto err_cancel_table;
118
119 nla_nest_end(skb, start: table);
120 }
121
122 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
123 u32 zero = 0;
124
125 table = nla_nest_start(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE);
126 if (!table)
127 goto err_cancel_ports;
128
129 if (nla_put_u32(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, value: 1))
130 goto err_cancel_table;
131
132 if (ethnl_put_bitset32(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
133 val: &zero, NULL,
134 nbits: __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
135 names: udp_tunnel_type_names, compact))
136 goto err_cancel_table;
137
138 entry = nla_nest_start(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
139 if (!entry)
140 goto err_cancel_entry;
141
142 if (nla_put_be16(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
143 htons(IANA_VXLAN_UDP_PORT)) ||
144 nla_put_u32(skb, attrtype: ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
145 ilog2(UDP_TUNNEL_TYPE_VXLAN)))
146 goto err_cancel_entry;
147
148 nla_nest_end(skb, start: entry);
149 nla_nest_end(skb, start: table);
150 }
151
152 nla_nest_end(skb, start: ports);
153
154 return 0;
155
156err_cancel_entry:
157 nla_nest_cancel(skb, start: entry);
158err_cancel_table:
159 nla_nest_cancel(skb, start: table);
160err_cancel_ports:
161 nla_nest_cancel(skb, start: ports);
162 return -EMSGSIZE;
163}
164
165int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
166{
167 struct ethnl_req_info req_info = {};
168 struct nlattr **tb = info->attrs;
169 struct sk_buff *rskb;
170 void *reply_payload;
171 int reply_len;
172 int ret;
173
174 ret = ethnl_parse_header_dev_get(req_info: &req_info,
175 nest: tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
176 net: genl_info_net(info), extack: info->extack,
177 require_dev: true);
178 if (ret < 0)
179 return ret;
180
181 rtnl_lock();
182 ret = ethnl_tunnel_info_reply_size(req_base: &req_info, extack: info->extack);
183 if (ret < 0)
184 goto err_unlock_rtnl;
185 reply_len = ret + ethnl_reply_header_size();
186
187 rskb = ethnl_reply_init(payload: reply_len, dev: req_info.dev,
188 cmd: ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
189 hdr_attrtype: ETHTOOL_A_TUNNEL_INFO_HEADER,
190 info, ehdrp: &reply_payload);
191 if (!rskb) {
192 ret = -ENOMEM;
193 goto err_unlock_rtnl;
194 }
195
196 ret = ethnl_tunnel_info_fill_reply(req_base: &req_info, skb: rskb);
197 if (ret)
198 goto err_free_msg;
199 rtnl_unlock();
200 ethnl_parse_header_dev_put(req_info: &req_info);
201 genlmsg_end(skb: rskb, hdr: reply_payload);
202
203 return genlmsg_reply(skb: rskb, info);
204
205err_free_msg:
206 nlmsg_free(skb: rskb);
207err_unlock_rtnl:
208 rtnl_unlock();
209 ethnl_parse_header_dev_put(req_info: &req_info);
210 return ret;
211}
212
213struct ethnl_tunnel_info_dump_ctx {
214 struct ethnl_req_info req_info;
215 unsigned long ifindex;
216};
217
218int ethnl_tunnel_info_start(struct netlink_callback *cb)
219{
220 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
221 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
222 struct nlattr **tb = info->info.attrs;
223 int ret;
224
225 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
226
227 memset(ctx, 0, sizeof(*ctx));
228
229 ret = ethnl_parse_header_dev_get(req_info: &ctx->req_info,
230 nest: tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
231 net: sock_net(sk: cb->skb->sk), extack: cb->extack,
232 require_dev: false);
233 if (ctx->req_info.dev) {
234 ethnl_parse_header_dev_put(req_info: &ctx->req_info);
235 ctx->req_info.dev = NULL;
236 }
237
238 return ret;
239}
240
241int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
242{
243 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
244 struct net *net = sock_net(sk: skb->sk);
245 struct net_device *dev;
246 int ret = 0;
247 void *ehdr;
248
249 rtnl_lock();
250 for_each_netdev_dump(net, dev, ctx->ifindex) {
251 ehdr = ethnl_dump_put(skb, cb,
252 cmd: ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
253 if (!ehdr) {
254 ret = -EMSGSIZE;
255 break;
256 }
257
258 ret = ethnl_fill_reply_header(skb, dev,
259 attrtype: ETHTOOL_A_TUNNEL_INFO_HEADER);
260 if (ret < 0) {
261 genlmsg_cancel(skb, hdr: ehdr);
262 break;
263 }
264
265 ctx->req_info.dev = dev;
266 ret = ethnl_tunnel_info_fill_reply(req_base: &ctx->req_info, skb);
267 ctx->req_info.dev = NULL;
268 if (ret < 0) {
269 genlmsg_cancel(skb, hdr: ehdr);
270 if (ret == -EOPNOTSUPP)
271 continue;
272 break;
273 }
274 genlmsg_end(skb, hdr: ehdr);
275 }
276 rtnl_unlock();
277
278 if (ret == -EMSGSIZE && skb->len)
279 return skb->len;
280 return ret;
281}
282

source code of linux/net/ethtool/tunnels.c