1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * net busy poll support |
4 | * Copyright(c) 2013 Intel Corporation. |
5 | * |
6 | * Author: Eliezer Tamir |
7 | * |
8 | * Contact Information: |
9 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
10 | */ |
11 | |
12 | #ifndef _LINUX_NET_BUSY_POLL_H |
13 | #define _LINUX_NET_BUSY_POLL_H |
14 | |
15 | #include <linux/netdevice.h> |
16 | #include <linux/sched/clock.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <net/ip.h> |
19 | |
20 | /* 0 - Reserved to indicate value not set |
21 | * 1..NR_CPUS - Reserved for sender_cpu |
22 | * NR_CPUS+1..~0 - Region available for NAPI IDs |
23 | */ |
24 | #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) |
25 | |
26 | #define BUSY_POLL_BUDGET 8 |
27 | |
28 | #ifdef CONFIG_NET_RX_BUSY_POLL |
29 | |
30 | struct napi_struct; |
31 | extern unsigned int sysctl_net_busy_read __read_mostly; |
32 | extern unsigned int sysctl_net_busy_poll __read_mostly; |
33 | |
34 | static inline bool net_busy_loop_on(void) |
35 | { |
36 | return sysctl_net_busy_poll; |
37 | } |
38 | |
39 | static inline bool sk_can_busy_loop(const struct sock *sk) |
40 | { |
41 | return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); |
42 | } |
43 | |
44 | bool sk_busy_loop_end(void *p, unsigned long start_time); |
45 | |
46 | void napi_busy_loop(unsigned int napi_id, |
47 | bool (*loop_end)(void *, unsigned long), |
48 | void *loop_end_arg, bool prefer_busy_poll, u16 budget); |
49 | |
50 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
51 | static inline unsigned long net_busy_loop_on(void) |
52 | { |
53 | return 0; |
54 | } |
55 | |
56 | static inline bool sk_can_busy_loop(struct sock *sk) |
57 | { |
58 | return false; |
59 | } |
60 | |
61 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
62 | |
63 | static inline unsigned long busy_loop_current_time(void) |
64 | { |
65 | #ifdef CONFIG_NET_RX_BUSY_POLL |
66 | return (unsigned long)(local_clock() >> 10); |
67 | #else |
68 | return 0; |
69 | #endif |
70 | } |
71 | |
72 | /* in poll/select we use the global sysctl_net_ll_poll value */ |
73 | static inline bool busy_loop_timeout(unsigned long start_time) |
74 | { |
75 | #ifdef CONFIG_NET_RX_BUSY_POLL |
76 | unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); |
77 | |
78 | if (bp_usec) { |
79 | unsigned long end_time = start_time + bp_usec; |
80 | unsigned long now = busy_loop_current_time(); |
81 | |
82 | return time_after(now, end_time); |
83 | } |
84 | #endif |
85 | return true; |
86 | } |
87 | |
88 | static inline bool sk_busy_loop_timeout(struct sock *sk, |
89 | unsigned long start_time) |
90 | { |
91 | #ifdef CONFIG_NET_RX_BUSY_POLL |
92 | unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); |
93 | |
94 | if (bp_usec) { |
95 | unsigned long end_time = start_time + bp_usec; |
96 | unsigned long now = busy_loop_current_time(); |
97 | |
98 | return time_after(now, end_time); |
99 | } |
100 | #endif |
101 | return true; |
102 | } |
103 | |
104 | static inline void sk_busy_loop(struct sock *sk, int nonblock) |
105 | { |
106 | #ifdef CONFIG_NET_RX_BUSY_POLL |
107 | unsigned int napi_id = READ_ONCE(sk->sk_napi_id); |
108 | |
109 | if (napi_id >= MIN_NAPI_ID) |
110 | napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk, |
111 | READ_ONCE(sk->sk_prefer_busy_poll), |
112 | READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); |
113 | #endif |
114 | } |
115 | |
116 | /* used in the NIC receive handler to mark the skb */ |
117 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
118 | struct napi_struct *napi) |
119 | { |
120 | #ifdef CONFIG_NET_RX_BUSY_POLL |
121 | /* If the skb was already marked with a valid NAPI ID, avoid overwriting |
122 | * it. |
123 | */ |
124 | if (skb->napi_id < MIN_NAPI_ID) |
125 | skb->napi_id = napi->napi_id; |
126 | #endif |
127 | } |
128 | |
129 | /* used in the protocol hanlder to propagate the napi_id to the socket */ |
130 | static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) |
131 | { |
132 | #ifdef CONFIG_NET_RX_BUSY_POLL |
133 | if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) |
134 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
135 | #endif |
136 | sk_rx_queue_update(sk, skb); |
137 | } |
138 | |
139 | /* Variant of sk_mark_napi_id() for passive flow setup, |
140 | * as sk->sk_napi_id and sk->sk_rx_queue_mapping content |
141 | * needs to be set. |
142 | */ |
143 | static inline void sk_mark_napi_id_set(struct sock *sk, |
144 | const struct sk_buff *skb) |
145 | { |
146 | #ifdef CONFIG_NET_RX_BUSY_POLL |
147 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
148 | #endif |
149 | sk_rx_queue_set(sk, skb); |
150 | } |
151 | |
152 | static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id) |
153 | { |
154 | #ifdef CONFIG_NET_RX_BUSY_POLL |
155 | if (!READ_ONCE(sk->sk_napi_id)) |
156 | WRITE_ONCE(sk->sk_napi_id, napi_id); |
157 | #endif |
158 | } |
159 | |
160 | /* variant used for unconnected sockets */ |
161 | static inline void sk_mark_napi_id_once(struct sock *sk, |
162 | const struct sk_buff *skb) |
163 | { |
164 | #ifdef CONFIG_NET_RX_BUSY_POLL |
165 | __sk_mark_napi_id_once(sk, skb->napi_id); |
166 | #endif |
167 | } |
168 | |
169 | static inline void sk_mark_napi_id_once_xdp(struct sock *sk, |
170 | const struct xdp_buff *xdp) |
171 | { |
172 | #ifdef CONFIG_NET_RX_BUSY_POLL |
173 | __sk_mark_napi_id_once(sk, xdp->rxq->napi_id); |
174 | #endif |
175 | } |
176 | |
177 | #endif /* _LINUX_NET_BUSY_POLL_H */ |
178 | |