1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* AFS server record management |
3 | * |
4 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/sched.h> |
9 | #include <linux/slab.h> |
10 | #include "afs_fs.h" |
11 | #include "internal.h" |
12 | #include "protocol_yfs.h" |
13 | |
14 | static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ |
15 | static atomic_t afs_server_debug_id; |
16 | |
17 | static struct afs_server *afs_maybe_use_server(struct afs_server *, |
18 | enum afs_server_trace); |
19 | static void __afs_put_server(struct afs_net *, struct afs_server *); |
20 | |
21 | /* |
22 | * Find a server by one of its addresses. |
23 | */ |
24 | struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer *peer) |
25 | { |
26 | const struct afs_endpoint_state *estate; |
27 | const struct afs_addr_list *alist; |
28 | struct afs_server *server = NULL; |
29 | unsigned int i; |
30 | int seq = 1; |
31 | |
32 | rcu_read_lock(); |
33 | |
34 | do { |
35 | if (server) |
36 | afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq); |
37 | server = NULL; |
38 | seq++; /* 2 on the 1st/lockless path, otherwise odd */ |
39 | read_seqbegin_or_lock(lock: &net->fs_addr_lock, seq: &seq); |
40 | |
41 | hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) { |
42 | estate = rcu_dereference(server->endpoint_state); |
43 | alist = estate->addresses; |
44 | for (i = 0; i < alist->nr_addrs; i++) |
45 | if (alist->addrs[i].peer == peer) |
46 | goto found; |
47 | } |
48 | |
49 | server = NULL; |
50 | continue; |
51 | found: |
52 | server = afs_maybe_use_server(server, afs_server_trace_get_by_addr); |
53 | |
54 | } while (need_seqretry(lock: &net->fs_addr_lock, seq)); |
55 | |
56 | done_seqretry(lock: &net->fs_addr_lock, seq); |
57 | |
58 | rcu_read_unlock(); |
59 | return server; |
60 | } |
61 | |
62 | /* |
63 | * Look up a server by its UUID and mark it active. |
64 | */ |
65 | struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid) |
66 | { |
67 | struct afs_server *server = NULL; |
68 | struct rb_node *p; |
69 | int diff, seq = 1; |
70 | |
71 | _enter("%pU" , uuid); |
72 | |
73 | do { |
74 | /* Unfortunately, rbtree walking doesn't give reliable results |
75 | * under just the RCU read lock, so we have to check for |
76 | * changes. |
77 | */ |
78 | if (server) |
79 | afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq); |
80 | server = NULL; |
81 | seq++; /* 2 on the 1st/lockless path, otherwise odd */ |
82 | read_seqbegin_or_lock(lock: &net->fs_lock, seq: &seq); |
83 | |
84 | p = net->fs_servers.rb_node; |
85 | while (p) { |
86 | server = rb_entry(p, struct afs_server, uuid_rb); |
87 | |
88 | diff = memcmp(p: uuid, q: &server->uuid, size: sizeof(*uuid)); |
89 | if (diff < 0) { |
90 | p = p->rb_left; |
91 | } else if (diff > 0) { |
92 | p = p->rb_right; |
93 | } else { |
94 | afs_use_server(server, afs_server_trace_get_by_uuid); |
95 | break; |
96 | } |
97 | |
98 | server = NULL; |
99 | } |
100 | } while (need_seqretry(lock: &net->fs_lock, seq)); |
101 | |
102 | done_seqretry(lock: &net->fs_lock, seq); |
103 | |
104 | _leave(" = %p" , server); |
105 | return server; |
106 | } |
107 | |
108 | /* |
109 | * Install a server record in the namespace tree. If there's a clash, we stick |
110 | * it into a list anchored on whichever afs_server struct is actually in the |
111 | * tree. |
112 | */ |
113 | static struct afs_server *afs_install_server(struct afs_cell *cell, |
114 | struct afs_server *candidate) |
115 | { |
116 | const struct afs_endpoint_state *estate; |
117 | const struct afs_addr_list *alist; |
118 | struct afs_server *server, *next; |
119 | struct afs_net *net = cell->net; |
120 | struct rb_node **pp, *p; |
121 | int diff; |
122 | |
123 | _enter("%p" , candidate); |
124 | |
125 | write_seqlock(sl: &net->fs_lock); |
126 | |
127 | /* Firstly install the server in the UUID lookup tree */ |
128 | pp = &net->fs_servers.rb_node; |
129 | p = NULL; |
130 | while (*pp) { |
131 | p = *pp; |
132 | _debug("- consider %p" , p); |
133 | server = rb_entry(p, struct afs_server, uuid_rb); |
134 | diff = memcmp(p: &candidate->uuid, q: &server->uuid, size: sizeof(uuid_t)); |
135 | if (diff < 0) { |
136 | pp = &(*pp)->rb_left; |
137 | } else if (diff > 0) { |
138 | pp = &(*pp)->rb_right; |
139 | } else { |
140 | if (server->cell == cell) |
141 | goto exists; |
142 | |
143 | /* We have the same UUID representing servers in |
144 | * different cells. Append the new server to the list. |
145 | */ |
146 | for (;;) { |
147 | next = rcu_dereference_protected( |
148 | server->uuid_next, |
149 | lockdep_is_held(&net->fs_lock.lock)); |
150 | if (!next) |
151 | break; |
152 | server = next; |
153 | } |
154 | rcu_assign_pointer(server->uuid_next, candidate); |
155 | candidate->uuid_prev = server; |
156 | server = candidate; |
157 | goto added_dup; |
158 | } |
159 | } |
160 | |
161 | server = candidate; |
162 | rb_link_node(node: &server->uuid_rb, parent: p, rb_link: pp); |
163 | rb_insert_color(&server->uuid_rb, &net->fs_servers); |
164 | hlist_add_head_rcu(n: &server->proc_link, h: &net->fs_proc); |
165 | |
166 | added_dup: |
167 | write_seqlock(sl: &net->fs_addr_lock); |
168 | estate = rcu_dereference_protected(server->endpoint_state, |
169 | lockdep_is_held(&net->fs_addr_lock.lock)); |
170 | alist = estate->addresses; |
171 | |
172 | /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install |
173 | * it in the IPv4 and/or IPv6 reverse-map lists. |
174 | * |
175 | * TODO: For speed we want to use something other than a flat list |
176 | * here; even sorting the list in terms of lowest address would help a |
177 | * bit, but anything we might want to do gets messy and memory |
178 | * intensive. |
179 | */ |
180 | if (alist->nr_addrs > 0) |
181 | hlist_add_head_rcu(n: &server->addr_link, h: &net->fs_addresses); |
182 | |
183 | write_sequnlock(sl: &net->fs_addr_lock); |
184 | |
185 | exists: |
186 | afs_get_server(server, afs_server_trace_get_install); |
187 | write_sequnlock(sl: &net->fs_lock); |
188 | return server; |
189 | } |
190 | |
191 | /* |
192 | * Allocate a new server record and mark it active. |
193 | */ |
194 | static struct afs_server *afs_alloc_server(struct afs_cell *cell, |
195 | const uuid_t *uuid, |
196 | struct afs_addr_list *alist) |
197 | { |
198 | struct afs_endpoint_state *estate; |
199 | struct afs_server *server; |
200 | struct afs_net *net = cell->net; |
201 | |
202 | _enter("" ); |
203 | |
204 | server = kzalloc(size: sizeof(struct afs_server), GFP_KERNEL); |
205 | if (!server) |
206 | goto enomem; |
207 | |
208 | estate = kzalloc(size: sizeof(struct afs_endpoint_state), GFP_KERNEL); |
209 | if (!estate) |
210 | goto enomem_server; |
211 | |
212 | refcount_set(r: &server->ref, n: 1); |
213 | atomic_set(v: &server->active, i: 1); |
214 | server->debug_id = atomic_inc_return(v: &afs_server_debug_id); |
215 | server->addr_version = alist->version; |
216 | server->uuid = *uuid; |
217 | rwlock_init(&server->fs_lock); |
218 | INIT_LIST_HEAD(list: &server->volumes); |
219 | init_waitqueue_head(&server->probe_wq); |
220 | INIT_LIST_HEAD(list: &server->probe_link); |
221 | spin_lock_init(&server->probe_lock); |
222 | server->cell = cell; |
223 | server->rtt = UINT_MAX; |
224 | server->service_id = FS_SERVICE; |
225 | |
226 | server->probe_counter = 1; |
227 | server->probed_at = jiffies - LONG_MAX / 2; |
228 | refcount_set(r: &estate->ref, n: 1); |
229 | estate->addresses = alist; |
230 | estate->server_id = server->debug_id; |
231 | estate->probe_seq = 1; |
232 | rcu_assign_pointer(server->endpoint_state, estate); |
233 | |
234 | afs_inc_servers_outstanding(net); |
235 | trace_afs_server(server_debug_id: server->debug_id, ref: 1, active: 1, reason: afs_server_trace_alloc); |
236 | trace_afs_estate(server_debug_id: estate->server_id, estate_debug_id: estate->probe_seq, ref: refcount_read(r: &estate->ref), |
237 | reason: afs_estate_trace_alloc_server); |
238 | _leave(" = %p" , server); |
239 | return server; |
240 | |
241 | enomem_server: |
242 | kfree(objp: server); |
243 | enomem: |
244 | _leave(" = NULL [nomem]" ); |
245 | return NULL; |
246 | } |
247 | |
248 | /* |
249 | * Look up an address record for a server |
250 | */ |
251 | static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell, |
252 | struct key *key, const uuid_t *uuid) |
253 | { |
254 | struct afs_vl_cursor vc; |
255 | struct afs_addr_list *alist = NULL; |
256 | int ret; |
257 | |
258 | ret = -ERESTARTSYS; |
259 | if (afs_begin_vlserver_operation(&vc, cell, key)) { |
260 | while (afs_select_vlserver(&vc)) { |
261 | if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) |
262 | alist = afs_yfsvl_get_endpoints(&vc, uuid); |
263 | else |
264 | alist = afs_vl_get_addrs_u(&vc, uuid); |
265 | } |
266 | |
267 | ret = afs_end_vlserver_operation(&vc); |
268 | } |
269 | |
270 | return ret < 0 ? ERR_PTR(error: ret) : alist; |
271 | } |
272 | |
273 | /* |
274 | * Get or create a fileserver record. |
275 | */ |
276 | struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key, |
277 | const uuid_t *uuid, u32 addr_version) |
278 | { |
279 | struct afs_addr_list *alist; |
280 | struct afs_server *server, *candidate; |
281 | |
282 | _enter("%p,%pU" , cell->net, uuid); |
283 | |
284 | server = afs_find_server_by_uuid(net: cell->net, uuid); |
285 | if (server) { |
286 | if (server->addr_version != addr_version) |
287 | set_bit(AFS_SERVER_FL_NEEDS_UPDATE, addr: &server->flags); |
288 | return server; |
289 | } |
290 | |
291 | alist = afs_vl_lookup_addrs(cell, key, uuid); |
292 | if (IS_ERR(ptr: alist)) |
293 | return ERR_CAST(ptr: alist); |
294 | |
295 | candidate = afs_alloc_server(cell, uuid, alist); |
296 | if (!candidate) { |
297 | afs_put_addrlist(alist, reason: afs_alist_trace_put_server_oom); |
298 | return ERR_PTR(error: -ENOMEM); |
299 | } |
300 | |
301 | server = afs_install_server(cell, candidate); |
302 | if (server != candidate) { |
303 | afs_put_addrlist(alist, reason: afs_alist_trace_put_server_dup); |
304 | kfree(objp: candidate); |
305 | } else { |
306 | /* Immediately dispatch an asynchronous probe to each interface |
307 | * on the fileserver. This will make sure the repeat-probing |
308 | * service is started. |
309 | */ |
310 | afs_fs_probe_fileserver(net: cell->net, server, new_addrs: alist, key); |
311 | } |
312 | |
313 | return server; |
314 | } |
315 | |
316 | /* |
317 | * Set the server timer to fire after a given delay, assuming it's not already |
318 | * set for an earlier time. |
319 | */ |
320 | static void afs_set_server_timer(struct afs_net *net, time64_t delay) |
321 | { |
322 | if (net->live) { |
323 | afs_inc_servers_outstanding(net); |
324 | if (timer_reduce(timer: &net->fs_timer, expires: jiffies + delay * HZ)) |
325 | afs_dec_servers_outstanding(net); |
326 | } |
327 | } |
328 | |
329 | /* |
330 | * Server management timer. We have an increment on fs_outstanding that we |
331 | * need to pass along to the work item. |
332 | */ |
333 | void afs_servers_timer(struct timer_list *timer) |
334 | { |
335 | struct afs_net *net = container_of(timer, struct afs_net, fs_timer); |
336 | |
337 | _enter("" ); |
338 | if (!queue_work(wq: afs_wq, work: &net->fs_manager)) |
339 | afs_dec_servers_outstanding(net); |
340 | } |
341 | |
342 | /* |
343 | * Get a reference on a server object. |
344 | */ |
345 | struct afs_server *afs_get_server(struct afs_server *server, |
346 | enum afs_server_trace reason) |
347 | { |
348 | unsigned int a; |
349 | int r; |
350 | |
351 | __refcount_inc(r: &server->ref, oldp: &r); |
352 | a = atomic_read(v: &server->active); |
353 | trace_afs_server(server_debug_id: server->debug_id, ref: r + 1, active: a, reason); |
354 | return server; |
355 | } |
356 | |
357 | /* |
358 | * Try to get a reference on a server object. |
359 | */ |
360 | static struct afs_server *afs_maybe_use_server(struct afs_server *server, |
361 | enum afs_server_trace reason) |
362 | { |
363 | unsigned int a; |
364 | int r; |
365 | |
366 | if (!__refcount_inc_not_zero(r: &server->ref, oldp: &r)) |
367 | return NULL; |
368 | |
369 | a = atomic_inc_return(v: &server->active); |
370 | trace_afs_server(server_debug_id: server->debug_id, ref: r + 1, active: a, reason); |
371 | return server; |
372 | } |
373 | |
374 | /* |
375 | * Get an active count on a server object. |
376 | */ |
377 | struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason) |
378 | { |
379 | unsigned int a; |
380 | int r; |
381 | |
382 | __refcount_inc(r: &server->ref, oldp: &r); |
383 | a = atomic_inc_return(v: &server->active); |
384 | |
385 | trace_afs_server(server_debug_id: server->debug_id, ref: r + 1, active: a, reason); |
386 | return server; |
387 | } |
388 | |
389 | /* |
390 | * Release a reference on a server record. |
391 | */ |
392 | void afs_put_server(struct afs_net *net, struct afs_server *server, |
393 | enum afs_server_trace reason) |
394 | { |
395 | unsigned int a, debug_id = server->debug_id; |
396 | bool zero; |
397 | int r; |
398 | |
399 | if (!server) |
400 | return; |
401 | |
402 | a = atomic_read(v: &server->active); |
403 | zero = __refcount_dec_and_test(r: &server->ref, oldp: &r); |
404 | trace_afs_server(server_debug_id: debug_id, ref: r - 1, active: a, reason); |
405 | if (unlikely(zero)) |
406 | __afs_put_server(net, server); |
407 | } |
408 | |
409 | /* |
410 | * Drop an active count on a server object without updating the last-unused |
411 | * time. |
412 | */ |
413 | void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server, |
414 | enum afs_server_trace reason) |
415 | { |
416 | if (server) { |
417 | unsigned int active = atomic_dec_return(v: &server->active); |
418 | |
419 | if (active == 0) |
420 | afs_set_server_timer(net, delay: afs_server_gc_delay); |
421 | afs_put_server(net, server, reason); |
422 | } |
423 | } |
424 | |
425 | /* |
426 | * Drop an active count on a server object. |
427 | */ |
428 | void afs_unuse_server(struct afs_net *net, struct afs_server *server, |
429 | enum afs_server_trace reason) |
430 | { |
431 | if (server) { |
432 | server->unuse_time = ktime_get_real_seconds(); |
433 | afs_unuse_server_notime(net, server, reason); |
434 | } |
435 | } |
436 | |
437 | static void afs_server_rcu(struct rcu_head *rcu) |
438 | { |
439 | struct afs_server *server = container_of(rcu, struct afs_server, rcu); |
440 | |
441 | trace_afs_server(server_debug_id: server->debug_id, ref: refcount_read(r: &server->ref), |
442 | active: atomic_read(v: &server->active), reason: afs_server_trace_free); |
443 | afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state), |
444 | where: afs_estate_trace_put_server); |
445 | kfree(objp: server); |
446 | } |
447 | |
448 | static void __afs_put_server(struct afs_net *net, struct afs_server *server) |
449 | { |
450 | call_rcu(head: &server->rcu, func: afs_server_rcu); |
451 | afs_dec_servers_outstanding(net); |
452 | } |
453 | |
454 | static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server) |
455 | { |
456 | struct afs_endpoint_state *estate = rcu_access_pointer(server->endpoint_state); |
457 | struct afs_addr_list *alist = estate->addresses; |
458 | |
459 | afs_fs_give_up_all_callbacks(net, server, addr: &alist->addrs[alist->preferred], NULL); |
460 | } |
461 | |
462 | /* |
463 | * destroy a dead server |
464 | */ |
465 | static void afs_destroy_server(struct afs_net *net, struct afs_server *server) |
466 | { |
467 | if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) |
468 | afs_give_up_callbacks(net, server); |
469 | |
470 | afs_put_server(net, server, reason: afs_server_trace_destroy); |
471 | } |
472 | |
473 | /* |
474 | * Garbage collect any expired servers. |
475 | */ |
476 | static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) |
477 | { |
478 | struct afs_server *server, *next, *prev; |
479 | int active; |
480 | |
481 | while ((server = gc_list)) { |
482 | gc_list = server->gc_next; |
483 | |
484 | write_seqlock(sl: &net->fs_lock); |
485 | |
486 | active = atomic_read(v: &server->active); |
487 | if (active == 0) { |
488 | trace_afs_server(server_debug_id: server->debug_id, ref: refcount_read(r: &server->ref), |
489 | active, reason: afs_server_trace_gc); |
490 | next = rcu_dereference_protected( |
491 | server->uuid_next, lockdep_is_held(&net->fs_lock.lock)); |
492 | prev = server->uuid_prev; |
493 | if (!prev) { |
494 | /* The one at the front is in the tree */ |
495 | if (!next) { |
496 | rb_erase(&server->uuid_rb, &net->fs_servers); |
497 | } else { |
498 | rb_replace_node_rcu(victim: &server->uuid_rb, |
499 | new: &next->uuid_rb, |
500 | root: &net->fs_servers); |
501 | next->uuid_prev = NULL; |
502 | } |
503 | } else { |
504 | /* This server is not at the front */ |
505 | rcu_assign_pointer(prev->uuid_next, next); |
506 | if (next) |
507 | next->uuid_prev = prev; |
508 | } |
509 | |
510 | list_del(entry: &server->probe_link); |
511 | hlist_del_rcu(n: &server->proc_link); |
512 | if (!hlist_unhashed(h: &server->addr_link)) |
513 | hlist_del_rcu(n: &server->addr_link); |
514 | } |
515 | write_sequnlock(sl: &net->fs_lock); |
516 | |
517 | if (active == 0) |
518 | afs_destroy_server(net, server); |
519 | } |
520 | } |
521 | |
522 | /* |
523 | * Manage the records of servers known to be within a network namespace. This |
524 | * includes garbage collecting unused servers. |
525 | * |
526 | * Note also that we were given an increment on net->servers_outstanding by |
527 | * whoever queued us that we need to deal with before returning. |
528 | */ |
529 | void afs_manage_servers(struct work_struct *work) |
530 | { |
531 | struct afs_net *net = container_of(work, struct afs_net, fs_manager); |
532 | struct afs_server *gc_list = NULL; |
533 | struct rb_node *cursor; |
534 | time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; |
535 | bool purging = !net->live; |
536 | |
537 | _enter("" ); |
538 | |
539 | /* Trawl the server list looking for servers that have expired from |
540 | * lack of use. |
541 | */ |
542 | read_seqlock_excl(sl: &net->fs_lock); |
543 | |
544 | for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) { |
545 | struct afs_server *server = |
546 | rb_entry(cursor, struct afs_server, uuid_rb); |
547 | int active = atomic_read(v: &server->active); |
548 | |
549 | _debug("manage %pU %u" , &server->uuid, active); |
550 | |
551 | if (purging) { |
552 | trace_afs_server(server_debug_id: server->debug_id, ref: refcount_read(r: &server->ref), |
553 | active, reason: afs_server_trace_purging); |
554 | if (active != 0) |
555 | pr_notice("Can't purge s=%08x\n" , server->debug_id); |
556 | } |
557 | |
558 | if (active == 0) { |
559 | time64_t expire_at = server->unuse_time; |
560 | |
561 | if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) && |
562 | !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags)) |
563 | expire_at += afs_server_gc_delay; |
564 | if (purging || expire_at <= now) { |
565 | server->gc_next = gc_list; |
566 | gc_list = server; |
567 | } else if (expire_at < next_manage) { |
568 | next_manage = expire_at; |
569 | } |
570 | } |
571 | } |
572 | |
573 | read_sequnlock_excl(sl: &net->fs_lock); |
574 | |
575 | /* Update the timer on the way out. We have to pass an increment on |
576 | * servers_outstanding in the namespace that we are in to the timer or |
577 | * the work scheduler. |
578 | */ |
579 | if (!purging && next_manage < TIME64_MAX) { |
580 | now = ktime_get_real_seconds(); |
581 | |
582 | if (next_manage - now <= 0) { |
583 | if (queue_work(wq: afs_wq, work: &net->fs_manager)) |
584 | afs_inc_servers_outstanding(net); |
585 | } else { |
586 | afs_set_server_timer(net, delay: next_manage - now); |
587 | } |
588 | } |
589 | |
590 | afs_gc_servers(net, gc_list); |
591 | |
592 | afs_dec_servers_outstanding(net); |
593 | _leave(" [%d]" , atomic_read(&net->servers_outstanding)); |
594 | } |
595 | |
596 | static void afs_queue_server_manager(struct afs_net *net) |
597 | { |
598 | afs_inc_servers_outstanding(net); |
599 | if (!queue_work(wq: afs_wq, work: &net->fs_manager)) |
600 | afs_dec_servers_outstanding(net); |
601 | } |
602 | |
603 | /* |
604 | * Purge list of servers. |
605 | */ |
606 | void afs_purge_servers(struct afs_net *net) |
607 | { |
608 | _enter("" ); |
609 | |
610 | if (del_timer_sync(timer: &net->fs_timer)) |
611 | afs_dec_servers_outstanding(net); |
612 | |
613 | afs_queue_server_manager(net); |
614 | |
615 | _debug("wait" ); |
616 | atomic_dec(v: &net->servers_outstanding); |
617 | wait_var_event(&net->servers_outstanding, |
618 | !atomic_read(&net->servers_outstanding)); |
619 | _leave("" ); |
620 | } |
621 | |
622 | /* |
623 | * Get an update for a server's address list. |
624 | */ |
625 | static noinline bool afs_update_server_record(struct afs_operation *op, |
626 | struct afs_server *server, |
627 | struct key *key) |
628 | { |
629 | struct afs_endpoint_state *estate; |
630 | struct afs_addr_list *alist; |
631 | bool has_addrs; |
632 | |
633 | _enter("" ); |
634 | |
635 | trace_afs_server(server_debug_id: server->debug_id, ref: refcount_read(r: &server->ref), |
636 | active: atomic_read(v: &server->active), |
637 | reason: afs_server_trace_update); |
638 | |
639 | alist = afs_vl_lookup_addrs(cell: op->volume->cell, key: op->key, uuid: &server->uuid); |
640 | if (IS_ERR(ptr: alist)) { |
641 | rcu_read_lock(); |
642 | estate = rcu_dereference(server->endpoint_state); |
643 | has_addrs = estate->addresses; |
644 | rcu_read_unlock(); |
645 | |
646 | if ((PTR_ERR(ptr: alist) == -ERESTARTSYS || |
647 | PTR_ERR(ptr: alist) == -EINTR) && |
648 | (op->flags & AFS_OPERATION_UNINTR) && |
649 | has_addrs) { |
650 | _leave(" = t [intr]" ); |
651 | return true; |
652 | } |
653 | afs_op_set_error(op, error: PTR_ERR(ptr: alist)); |
654 | _leave(" = f [%d]" , afs_op_error(op)); |
655 | return false; |
656 | } |
657 | |
658 | if (server->addr_version != alist->version) |
659 | afs_fs_probe_fileserver(net: op->net, server, new_addrs: alist, key); |
660 | |
661 | afs_put_addrlist(alist, reason: afs_alist_trace_put_server_update); |
662 | _leave(" = t" ); |
663 | return true; |
664 | } |
665 | |
666 | /* |
667 | * See if a server's address list needs updating. |
668 | */ |
669 | bool afs_check_server_record(struct afs_operation *op, struct afs_server *server, |
670 | struct key *key) |
671 | { |
672 | bool success; |
673 | int ret, retries = 0; |
674 | |
675 | _enter("" ); |
676 | |
677 | ASSERT(server); |
678 | |
679 | retry: |
680 | if (test_bit(AFS_SERVER_FL_UPDATING, &server->flags)) |
681 | goto wait; |
682 | if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags)) |
683 | goto update; |
684 | _leave(" = t [good]" ); |
685 | return true; |
686 | |
687 | update: |
688 | if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, addr: &server->flags)) { |
689 | clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, addr: &server->flags); |
690 | success = afs_update_server_record(op, server, key); |
691 | clear_bit_unlock(AFS_SERVER_FL_UPDATING, addr: &server->flags); |
692 | wake_up_bit(word: &server->flags, AFS_SERVER_FL_UPDATING); |
693 | _leave(" = %d" , success); |
694 | return success; |
695 | } |
696 | |
697 | wait: |
698 | ret = wait_on_bit(word: &server->flags, AFS_SERVER_FL_UPDATING, |
699 | mode: (op->flags & AFS_OPERATION_UNINTR) ? |
700 | TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); |
701 | if (ret == -ERESTARTSYS) { |
702 | afs_op_set_error(op, error: ret); |
703 | _leave(" = f [intr]" ); |
704 | return false; |
705 | } |
706 | |
707 | retries++; |
708 | if (retries == 4) { |
709 | _leave(" = f [stale]" ); |
710 | ret = -ESTALE; |
711 | return false; |
712 | } |
713 | goto retry; |
714 | } |
715 | |