1 | /* |
2 | * INETPEER - A storage for permanent information about peers |
3 | * |
4 | * This source is covered by the GNU GPL, the same as all kernel sources. |
5 | * |
6 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
7 | */ |
8 | |
9 | #include <linux/cache.h> |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/random.h> |
16 | #include <linux/timer.h> |
17 | #include <linux/time.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/net.h> |
21 | #include <linux/workqueue.h> |
22 | #include <net/ip.h> |
23 | #include <net/inetpeer.h> |
24 | #include <net/secure_seq.h> |
25 | |
26 | /* |
27 | * Theory of operations. |
28 | * We keep one entry for each peer IP address. The nodes contains long-living |
29 | * information about the peer which doesn't depend on routes. |
30 | * |
31 | * Nodes are removed only when reference counter goes to 0. |
32 | * When it's happened the node may be removed when a sufficient amount of |
33 | * time has been passed since its last use. The less-recently-used entry can |
34 | * also be removed if the pool is overloaded i.e. if the total amount of |
35 | * entries is greater-or-equal than the threshold. |
36 | * |
37 | * Node pool is organised as an RB tree. |
38 | * Such an implementation has been chosen not just for fun. It's a way to |
39 | * prevent easy and efficient DoS attacks by creating hash collisions. A huge |
40 | * amount of long living nodes in a single hash slot would significantly delay |
41 | * lookups performed with disabled BHs. |
42 | * |
43 | * Serialisation issues. |
44 | * 1. Nodes may appear in the tree only with the pool lock held. |
45 | * 2. Nodes may disappear from the tree only with the pool lock held |
46 | * AND reference count being 0. |
47 | * 3. Global variable peer_total is modified under the pool lock. |
48 | * 4. struct inet_peer fields modification: |
49 | * rb_node: pool lock |
50 | * refcnt: atomically against modifications on other CPU; |
51 | * usually under some other lock to prevent node disappearing |
52 | * daddr: unchangeable |
53 | */ |
54 | |
55 | static struct kmem_cache *peer_cachep __ro_after_init; |
56 | |
57 | void inet_peer_base_init(struct inet_peer_base *bp) |
58 | { |
59 | bp->rb_root = RB_ROOT; |
60 | seqlock_init(&bp->lock); |
61 | bp->total = 0; |
62 | } |
63 | EXPORT_SYMBOL_GPL(inet_peer_base_init); |
64 | |
65 | #define PEER_MAX_GC 32 |
66 | |
67 | /* Exported for sysctl_net_ipv4. */ |
68 | int inet_peer_threshold __read_mostly; /* start to throw entries more |
69 | * aggressively at this stage */ |
70 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
71 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ |
72 | |
73 | /* Called from ip_output.c:ip_init */ |
74 | void __init inet_initpeers(void) |
75 | { |
76 | u64 nr_entries; |
77 | |
78 | /* 1% of physical memory */ |
79 | nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT, |
80 | 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer))); |
81 | |
82 | inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128); |
83 | |
84 | peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
85 | } |
86 | |
87 | /* Called with rcu_read_lock() or base->lock held */ |
88 | static struct inet_peer *lookup(const struct inetpeer_addr *daddr, |
89 | struct inet_peer_base *base, |
90 | unsigned int seq, |
91 | struct inet_peer *gc_stack[], |
92 | unsigned int *gc_cnt, |
93 | struct rb_node **parent_p, |
94 | struct rb_node ***pp_p) |
95 | { |
96 | struct rb_node **pp, *parent, *next; |
97 | struct inet_peer *p; |
98 | |
99 | pp = &base->rb_root.rb_node; |
100 | parent = NULL; |
101 | while (1) { |
102 | int cmp; |
103 | |
104 | next = rcu_dereference_raw(*pp); |
105 | if (!next) |
106 | break; |
107 | parent = next; |
108 | p = rb_entry(parent, struct inet_peer, rb_node); |
109 | cmp = inetpeer_addr_cmp(a: daddr, b: &p->daddr); |
110 | if (cmp == 0) { |
111 | if (!refcount_inc_not_zero(r: &p->refcnt)) |
112 | break; |
113 | return p; |
114 | } |
115 | if (gc_stack) { |
116 | if (*gc_cnt < PEER_MAX_GC) |
117 | gc_stack[(*gc_cnt)++] = p; |
118 | } else if (unlikely(read_seqretry(&base->lock, seq))) { |
119 | break; |
120 | } |
121 | if (cmp == -1) |
122 | pp = &next->rb_left; |
123 | else |
124 | pp = &next->rb_right; |
125 | } |
126 | *parent_p = parent; |
127 | *pp_p = pp; |
128 | return NULL; |
129 | } |
130 | |
131 | static void inetpeer_free_rcu(struct rcu_head *head) |
132 | { |
133 | kmem_cache_free(s: peer_cachep, container_of(head, struct inet_peer, rcu)); |
134 | } |
135 | |
136 | /* perform garbage collect on all items stacked during a lookup */ |
137 | static void inet_peer_gc(struct inet_peer_base *base, |
138 | struct inet_peer *gc_stack[], |
139 | unsigned int gc_cnt) |
140 | { |
141 | int peer_threshold, peer_maxttl, peer_minttl; |
142 | struct inet_peer *p; |
143 | __u32 delta, ttl; |
144 | int i; |
145 | |
146 | peer_threshold = READ_ONCE(inet_peer_threshold); |
147 | peer_maxttl = READ_ONCE(inet_peer_maxttl); |
148 | peer_minttl = READ_ONCE(inet_peer_minttl); |
149 | |
150 | if (base->total >= peer_threshold) |
151 | ttl = 0; /* be aggressive */ |
152 | else |
153 | ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * |
154 | base->total / peer_threshold * HZ; |
155 | for (i = 0; i < gc_cnt; i++) { |
156 | p = gc_stack[i]; |
157 | |
158 | /* The READ_ONCE() pairs with the WRITE_ONCE() |
159 | * in inet_putpeer() |
160 | */ |
161 | delta = (__u32)jiffies - READ_ONCE(p->dtime); |
162 | |
163 | if (delta < ttl || !refcount_dec_if_one(r: &p->refcnt)) |
164 | gc_stack[i] = NULL; |
165 | } |
166 | for (i = 0; i < gc_cnt; i++) { |
167 | p = gc_stack[i]; |
168 | if (p) { |
169 | rb_erase(&p->rb_node, &base->rb_root); |
170 | base->total--; |
171 | call_rcu(head: &p->rcu, func: inetpeer_free_rcu); |
172 | } |
173 | } |
174 | } |
175 | |
176 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, |
177 | const struct inetpeer_addr *daddr, |
178 | int create) |
179 | { |
180 | struct inet_peer *p, *gc_stack[PEER_MAX_GC]; |
181 | struct rb_node **pp, *parent; |
182 | unsigned int gc_cnt, seq; |
183 | int invalidated; |
184 | |
185 | /* Attempt a lockless lookup first. |
186 | * Because of a concurrent writer, we might not find an existing entry. |
187 | */ |
188 | rcu_read_lock(); |
189 | seq = read_seqbegin(sl: &base->lock); |
190 | p = lookup(daddr, base, seq, NULL, gc_cnt: &gc_cnt, parent_p: &parent, pp_p: &pp); |
191 | invalidated = read_seqretry(sl: &base->lock, start: seq); |
192 | rcu_read_unlock(); |
193 | |
194 | if (p) |
195 | return p; |
196 | |
197 | /* If no writer did a change during our lookup, we can return early. */ |
198 | if (!create && !invalidated) |
199 | return NULL; |
200 | |
201 | /* retry an exact lookup, taking the lock before. |
202 | * At least, nodes should be hot in our cache. |
203 | */ |
204 | parent = NULL; |
205 | write_seqlock_bh(sl: &base->lock); |
206 | |
207 | gc_cnt = 0; |
208 | p = lookup(daddr, base, seq, gc_stack, gc_cnt: &gc_cnt, parent_p: &parent, pp_p: &pp); |
209 | if (!p && create) { |
210 | p = kmem_cache_alloc(cachep: peer_cachep, GFP_ATOMIC); |
211 | if (p) { |
212 | p->daddr = *daddr; |
213 | p->dtime = (__u32)jiffies; |
214 | refcount_set(r: &p->refcnt, n: 2); |
215 | atomic_set(v: &p->rid, i: 0); |
216 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
217 | p->rate_tokens = 0; |
218 | p->n_redirects = 0; |
219 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
220 | * calculation of tokens is at its maximum. |
221 | */ |
222 | p->rate_last = jiffies - 60*HZ; |
223 | |
224 | rb_link_node(node: &p->rb_node, parent, rb_link: pp); |
225 | rb_insert_color(&p->rb_node, &base->rb_root); |
226 | base->total++; |
227 | } |
228 | } |
229 | if (gc_cnt) |
230 | inet_peer_gc(base, gc_stack, gc_cnt); |
231 | write_sequnlock_bh(sl: &base->lock); |
232 | |
233 | return p; |
234 | } |
235 | EXPORT_SYMBOL_GPL(inet_getpeer); |
236 | |
237 | void inet_putpeer(struct inet_peer *p) |
238 | { |
239 | /* The WRITE_ONCE() pairs with itself (we run lockless) |
240 | * and the READ_ONCE() in inet_peer_gc() |
241 | */ |
242 | WRITE_ONCE(p->dtime, (__u32)jiffies); |
243 | |
244 | if (refcount_dec_and_test(r: &p->refcnt)) |
245 | call_rcu(head: &p->rcu, func: inetpeer_free_rcu); |
246 | } |
247 | EXPORT_SYMBOL_GPL(inet_putpeer); |
248 | |
249 | /* |
250 | * Check transmit rate limitation for given message. |
251 | * The rate information is held in the inet_peer entries now. |
252 | * This function is generic and could be used for other purposes |
253 | * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. |
254 | * |
255 | * Note that the same inet_peer fields are modified by functions in |
256 | * route.c too, but these work for packet destinations while xrlim_allow |
257 | * works for icmp destinations. This means the rate limiting information |
258 | * for one "ip object" is shared - and these ICMPs are twice limited: |
259 | * by source and by destination. |
260 | * |
261 | * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate |
262 | * SHOULD allow setting of rate limits |
263 | * |
264 | * Shared between ICMPv4 and ICMPv6. |
265 | */ |
266 | #define XRLIM_BURST_FACTOR 6 |
267 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) |
268 | { |
269 | unsigned long now, token; |
270 | bool rc = false; |
271 | |
272 | if (!peer) |
273 | return true; |
274 | |
275 | token = peer->rate_tokens; |
276 | now = jiffies; |
277 | token += now - peer->rate_last; |
278 | peer->rate_last = now; |
279 | if (token > XRLIM_BURST_FACTOR * timeout) |
280 | token = XRLIM_BURST_FACTOR * timeout; |
281 | if (token >= timeout) { |
282 | token -= timeout; |
283 | rc = true; |
284 | } |
285 | peer->rate_tokens = token; |
286 | return rc; |
287 | } |
288 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
289 | |
290 | void inetpeer_invalidate_tree(struct inet_peer_base *base) |
291 | { |
292 | struct rb_node *p = rb_first(&base->rb_root); |
293 | |
294 | while (p) { |
295 | struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); |
296 | |
297 | p = rb_next(p); |
298 | rb_erase(&peer->rb_node, &base->rb_root); |
299 | inet_putpeer(peer); |
300 | cond_resched(); |
301 | } |
302 | |
303 | base->total = 0; |
304 | } |
305 | EXPORT_SYMBOL(inetpeer_invalidate_tree); |
306 | |