1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2019, Microsoft Corporation. |
3 | * |
4 | * Author: |
5 | * Haiyang Zhang <haiyangz@microsoft.com> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/netdevice.h> |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/ethtool.h> |
13 | #include <linux/netpoll.h> |
14 | #include <linux/bpf.h> |
15 | #include <linux/bpf_trace.h> |
16 | #include <linux/kernel.h> |
17 | #include <net/xdp.h> |
18 | |
19 | #include <linux/mutex.h> |
20 | #include <linux/rtnetlink.h> |
21 | |
22 | #include "hyperv_net.h" |
23 | |
24 | u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, |
25 | struct xdp_buff *xdp) |
26 | { |
27 | struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; |
28 | void *data = nvchan->rsc.data[0]; |
29 | u32 len = nvchan->rsc.len[0]; |
30 | struct page *page = NULL; |
31 | struct bpf_prog *prog; |
32 | u32 act = XDP_PASS; |
33 | bool drop = true; |
34 | |
35 | xdp->data_hard_start = NULL; |
36 | |
37 | rcu_read_lock(); |
38 | prog = rcu_dereference(nvchan->bpf_prog); |
39 | |
40 | if (!prog) |
41 | goto out; |
42 | |
43 | /* Ensure that the below memcpy() won't overflow the page buffer. */ |
44 | if (len > ndev->mtu + ETH_HLEN) { |
45 | act = XDP_DROP; |
46 | goto out; |
47 | } |
48 | |
49 | /* allocate page buffer for data */ |
50 | page = alloc_page(GFP_ATOMIC); |
51 | if (!page) { |
52 | act = XDP_DROP; |
53 | goto out; |
54 | } |
55 | |
56 | xdp_init_buff(xdp, PAGE_SIZE, rxq: &nvchan->xdp_rxq); |
57 | xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, data_len: len, meta_valid: false); |
58 | |
59 | memcpy(xdp->data, data, len); |
60 | |
61 | act = bpf_prog_run_xdp(prog, xdp); |
62 | |
63 | switch (act) { |
64 | case XDP_PASS: |
65 | case XDP_TX: |
66 | drop = false; |
67 | break; |
68 | |
69 | case XDP_DROP: |
70 | break; |
71 | |
72 | case XDP_REDIRECT: |
73 | if (!xdp_do_redirect(dev: ndev, xdp, prog)) { |
74 | nvchan->xdp_flush = true; |
75 | drop = false; |
76 | |
77 | u64_stats_update_begin(syncp: &rx_stats->syncp); |
78 | |
79 | rx_stats->xdp_redirect++; |
80 | rx_stats->packets++; |
81 | rx_stats->bytes += nvchan->rsc.pktlen; |
82 | |
83 | u64_stats_update_end(syncp: &rx_stats->syncp); |
84 | |
85 | break; |
86 | } else { |
87 | u64_stats_update_begin(syncp: &rx_stats->syncp); |
88 | rx_stats->xdp_drop++; |
89 | u64_stats_update_end(syncp: &rx_stats->syncp); |
90 | } |
91 | |
92 | fallthrough; |
93 | |
94 | case XDP_ABORTED: |
95 | trace_xdp_exception(dev: ndev, xdp: prog, act); |
96 | break; |
97 | |
98 | default: |
99 | bpf_warn_invalid_xdp_action(dev: ndev, prog, act); |
100 | } |
101 | |
102 | out: |
103 | rcu_read_unlock(); |
104 | |
105 | if (page && drop) { |
106 | __free_page(page); |
107 | xdp->data_hard_start = NULL; |
108 | } |
109 | |
110 | return act; |
111 | } |
112 | |
113 | unsigned int netvsc_xdp_fraglen(unsigned int len) |
114 | { |
115 | return SKB_DATA_ALIGN(len) + |
116 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
117 | } |
118 | |
119 | struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev) |
120 | { |
121 | return rtnl_dereference(nvdev->chan_table[0].bpf_prog); |
122 | } |
123 | |
124 | int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
125 | struct netlink_ext_ack *extack, |
126 | struct netvsc_device *nvdev) |
127 | { |
128 | struct bpf_prog *old_prog; |
129 | int buf_max, i; |
130 | |
131 | old_prog = netvsc_xdp_get(nvdev); |
132 | |
133 | if (!old_prog && !prog) |
134 | return 0; |
135 | |
136 | buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(len: dev->mtu + ETH_HLEN); |
137 | if (prog && buf_max > PAGE_SIZE) { |
138 | netdev_err(dev, format: "XDP: mtu:%u too large, buf_max:%u\n" , |
139 | dev->mtu, buf_max); |
140 | NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large" ); |
141 | |
142 | return -EOPNOTSUPP; |
143 | } |
144 | |
145 | if (prog && (dev->features & NETIF_F_LRO)) { |
146 | netdev_err(dev, format: "XDP: not support LRO\n" ); |
147 | NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO" ); |
148 | |
149 | return -EOPNOTSUPP; |
150 | } |
151 | |
152 | if (prog) |
153 | bpf_prog_add(prog, i: nvdev->num_chn - 1); |
154 | |
155 | for (i = 0; i < nvdev->num_chn; i++) |
156 | rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); |
157 | |
158 | if (old_prog) |
159 | for (i = 0; i < nvdev->num_chn; i++) |
160 | bpf_prog_put(prog: old_prog); |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) |
166 | { |
167 | struct netdev_bpf xdp; |
168 | int ret; |
169 | |
170 | ASSERT_RTNL(); |
171 | |
172 | if (!vf_netdev) |
173 | return 0; |
174 | |
175 | if (!vf_netdev->netdev_ops->ndo_bpf) |
176 | return 0; |
177 | |
178 | memset(&xdp, 0, sizeof(xdp)); |
179 | |
180 | if (prog) |
181 | bpf_prog_inc(prog); |
182 | |
183 | xdp.command = XDP_SETUP_PROG; |
184 | xdp.prog = prog; |
185 | |
186 | ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp); |
187 | |
188 | if (ret && prog) |
189 | bpf_prog_put(prog); |
190 | |
191 | return ret; |
192 | } |
193 | |
194 | int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
195 | { |
196 | struct net_device_context *ndevctx = netdev_priv(dev); |
197 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
198 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
199 | struct netlink_ext_ack *extack = bpf->extack; |
200 | int ret; |
201 | |
202 | if (!nvdev || nvdev->destroy) { |
203 | return -ENODEV; |
204 | } |
205 | |
206 | switch (bpf->command) { |
207 | case XDP_SETUP_PROG: |
208 | ret = netvsc_xdp_set(dev, prog: bpf->prog, extack, nvdev); |
209 | |
210 | if (ret) |
211 | return ret; |
212 | |
213 | ret = netvsc_vf_setxdp(vf_netdev, prog: bpf->prog); |
214 | |
215 | if (ret) { |
216 | netdev_err(dev, format: "vf_setxdp failed:%d\n" , ret); |
217 | NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed" ); |
218 | |
219 | netvsc_xdp_set(dev, NULL, extack, nvdev); |
220 | } |
221 | |
222 | return ret; |
223 | |
224 | default: |
225 | return -EINVAL; |
226 | } |
227 | } |
228 | |
229 | static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev, |
230 | struct xdp_frame *frame, u16 q_idx) |
231 | { |
232 | struct sk_buff *skb; |
233 | |
234 | skb = xdp_build_skb_from_frame(xdpf: frame, dev: ndev); |
235 | if (unlikely(!skb)) |
236 | return -ENOMEM; |
237 | |
238 | netvsc_get_hash(skb, ndc: netdev_priv(dev: ndev)); |
239 | |
240 | skb_record_rx_queue(skb, rx_queue: q_idx); |
241 | |
242 | netvsc_xdp_xmit(skb, ndev); |
243 | |
244 | return 0; |
245 | } |
246 | |
247 | int netvsc_ndoxdp_xmit(struct net_device *ndev, int n, |
248 | struct xdp_frame **frames, u32 flags) |
249 | { |
250 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
251 | const struct net_device_ops *vf_ops; |
252 | struct netvsc_stats_tx *tx_stats; |
253 | struct netvsc_device *nvsc_dev; |
254 | struct net_device *vf_netdev; |
255 | int i, count = 0; |
256 | u16 q_idx; |
257 | |
258 | /* Don't transmit if netvsc_device is gone */ |
259 | nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev); |
260 | if (unlikely(!nvsc_dev || nvsc_dev->destroy)) |
261 | return 0; |
262 | |
263 | /* If VF is present and up then redirect packets to it. |
264 | * Skip the VF if it is marked down or has no carrier. |
265 | * If netpoll is in uses, then VF can not be used either. |
266 | */ |
267 | vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev); |
268 | if (vf_netdev && netif_running(dev: vf_netdev) && |
269 | netif_carrier_ok(dev: vf_netdev) && !netpoll_tx_running(dev: ndev) && |
270 | vf_netdev->netdev_ops->ndo_xdp_xmit && |
271 | ndev_ctx->data_path_is_vf) { |
272 | vf_ops = vf_netdev->netdev_ops; |
273 | return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags); |
274 | } |
275 | |
276 | q_idx = smp_processor_id() % ndev->real_num_tx_queues; |
277 | |
278 | for (i = 0; i < n; i++) { |
279 | if (netvsc_ndoxdp_xmit_fm(ndev, frame: frames[i], q_idx)) |
280 | break; |
281 | |
282 | count++; |
283 | } |
284 | |
285 | tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; |
286 | |
287 | u64_stats_update_begin(syncp: &tx_stats->syncp); |
288 | tx_stats->xdp_xmit += count; |
289 | u64_stats_update_end(syncp: &tx_stats->syncp); |
290 | |
291 | return count; |
292 | } |
293 | |