1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * net busy poll support |
4 | * Copyright(c) 2013 Intel Corporation. |
5 | * |
6 | * Author: Eliezer Tamir |
7 | * |
8 | * Contact Information: |
9 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
10 | */ |
11 | |
12 | #ifndef _LINUX_NET_BUSY_POLL_H |
13 | #define _LINUX_NET_BUSY_POLL_H |
14 | |
15 | #include <linux/netdevice.h> |
16 | #include <linux/sched/clock.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <net/ip.h> |
19 | #include <net/xdp.h> |
20 | |
21 | /* 0 - Reserved to indicate value not set |
22 | * 1..NR_CPUS - Reserved for sender_cpu |
23 | * NR_CPUS+1..~0 - Region available for NAPI IDs |
24 | */ |
25 | #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) |
26 | |
27 | #define BUSY_POLL_BUDGET 8 |
28 | |
29 | #ifdef CONFIG_NET_RX_BUSY_POLL |
30 | |
31 | struct napi_struct; |
32 | extern unsigned int sysctl_net_busy_read __read_mostly; |
33 | extern unsigned int sysctl_net_busy_poll __read_mostly; |
34 | |
35 | static inline bool net_busy_loop_on(void) |
36 | { |
37 | return READ_ONCE(sysctl_net_busy_poll); |
38 | } |
39 | |
40 | static inline bool sk_can_busy_loop(const struct sock *sk) |
41 | { |
42 | return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); |
43 | } |
44 | |
45 | bool sk_busy_loop_end(void *p, unsigned long start_time); |
46 | |
47 | void napi_busy_loop(unsigned int napi_id, |
48 | bool (*loop_end)(void *, unsigned long), |
49 | void *loop_end_arg, bool prefer_busy_poll, u16 budget); |
50 | |
51 | void napi_busy_loop_rcu(unsigned int napi_id, |
52 | bool (*loop_end)(void *, unsigned long), |
53 | void *loop_end_arg, bool prefer_busy_poll, u16 budget); |
54 | |
55 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
56 | static inline unsigned long net_busy_loop_on(void) |
57 | { |
58 | return 0; |
59 | } |
60 | |
61 | static inline bool sk_can_busy_loop(struct sock *sk) |
62 | { |
63 | return false; |
64 | } |
65 | |
66 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
67 | |
68 | static inline unsigned long busy_loop_current_time(void) |
69 | { |
70 | #ifdef CONFIG_NET_RX_BUSY_POLL |
71 | return (unsigned long)(local_clock() >> 10); |
72 | #else |
73 | return 0; |
74 | #endif |
75 | } |
76 | |
77 | /* in poll/select we use the global sysctl_net_ll_poll value */ |
78 | static inline bool busy_loop_timeout(unsigned long start_time) |
79 | { |
80 | #ifdef CONFIG_NET_RX_BUSY_POLL |
81 | unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); |
82 | |
83 | if (bp_usec) { |
84 | unsigned long end_time = start_time + bp_usec; |
85 | unsigned long now = busy_loop_current_time(); |
86 | |
87 | return time_after(now, end_time); |
88 | } |
89 | #endif |
90 | return true; |
91 | } |
92 | |
93 | static inline bool sk_busy_loop_timeout(struct sock *sk, |
94 | unsigned long start_time) |
95 | { |
96 | #ifdef CONFIG_NET_RX_BUSY_POLL |
97 | unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); |
98 | |
99 | if (bp_usec) { |
100 | unsigned long end_time = start_time + bp_usec; |
101 | unsigned long now = busy_loop_current_time(); |
102 | |
103 | return time_after(now, end_time); |
104 | } |
105 | #endif |
106 | return true; |
107 | } |
108 | |
109 | static inline void sk_busy_loop(struct sock *sk, int nonblock) |
110 | { |
111 | #ifdef CONFIG_NET_RX_BUSY_POLL |
112 | unsigned int napi_id = READ_ONCE(sk->sk_napi_id); |
113 | |
114 | if (napi_id >= MIN_NAPI_ID) |
115 | napi_busy_loop(napi_id, loop_end: nonblock ? NULL : sk_busy_loop_end, loop_end_arg: sk, |
116 | READ_ONCE(sk->sk_prefer_busy_poll), |
117 | READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); |
118 | #endif |
119 | } |
120 | |
121 | /* used in the NIC receive handler to mark the skb */ |
122 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
123 | struct napi_struct *napi) |
124 | { |
125 | #ifdef CONFIG_NET_RX_BUSY_POLL |
126 | /* If the skb was already marked with a valid NAPI ID, avoid overwriting |
127 | * it. |
128 | */ |
129 | if (skb->napi_id < MIN_NAPI_ID) |
130 | skb->napi_id = napi->napi_id; |
131 | #endif |
132 | } |
133 | |
134 | /* used in the protocol hanlder to propagate the napi_id to the socket */ |
135 | static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) |
136 | { |
137 | #ifdef CONFIG_NET_RX_BUSY_POLL |
138 | if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) |
139 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
140 | #endif |
141 | sk_rx_queue_update(sk, skb); |
142 | } |
143 | |
144 | /* Variant of sk_mark_napi_id() for passive flow setup, |
145 | * as sk->sk_napi_id and sk->sk_rx_queue_mapping content |
146 | * needs to be set. |
147 | */ |
148 | static inline void sk_mark_napi_id_set(struct sock *sk, |
149 | const struct sk_buff *skb) |
150 | { |
151 | #ifdef CONFIG_NET_RX_BUSY_POLL |
152 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
153 | #endif |
154 | sk_rx_queue_set(sk, skb); |
155 | } |
156 | |
157 | static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id) |
158 | { |
159 | #ifdef CONFIG_NET_RX_BUSY_POLL |
160 | if (!READ_ONCE(sk->sk_napi_id)) |
161 | WRITE_ONCE(sk->sk_napi_id, napi_id); |
162 | #endif |
163 | } |
164 | |
165 | /* variant used for unconnected sockets */ |
166 | static inline void sk_mark_napi_id_once(struct sock *sk, |
167 | const struct sk_buff *skb) |
168 | { |
169 | #ifdef CONFIG_NET_RX_BUSY_POLL |
170 | __sk_mark_napi_id_once(sk, napi_id: skb->napi_id); |
171 | #endif |
172 | } |
173 | |
174 | static inline void sk_mark_napi_id_once_xdp(struct sock *sk, |
175 | const struct xdp_buff *xdp) |
176 | { |
177 | #ifdef CONFIG_NET_RX_BUSY_POLL |
178 | __sk_mark_napi_id_once(sk, napi_id: xdp->rxq->napi_id); |
179 | #endif |
180 | } |
181 | |
182 | #endif /* _LINUX_NET_BUSY_POLL_H */ |
183 | |