1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/net/sunrpc/clnt.c |
4 | * |
5 | * This file contains the high-level RPC interface. |
6 | * It is modeled as a finite state machine to support both synchronous |
7 | * and asynchronous requests. |
8 | * |
9 | * - RPC header generation and argument serialization. |
10 | * - Credential refresh. |
11 | * - TCP connect handling. |
12 | * - Retry of operation when it is suspected the operation failed because |
13 | * of uid squashing on the server, or when the credentials were stale |
14 | * and need to be refreshed, or when a packet was damaged in transit. |
15 | * This may be have to be moved to the VFS layer. |
16 | * |
17 | * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> |
18 | * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> |
19 | */ |
20 | |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/types.h> |
24 | #include <linux/kallsyms.h> |
25 | #include <linux/mm.h> |
26 | #include <linux/namei.h> |
27 | #include <linux/mount.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/rcupdate.h> |
30 | #include <linux/utsname.h> |
31 | #include <linux/workqueue.h> |
32 | #include <linux/in.h> |
33 | #include <linux/in6.h> |
34 | #include <linux/un.h> |
35 | |
36 | #include <linux/sunrpc/clnt.h> |
37 | #include <linux/sunrpc/addr.h> |
38 | #include <linux/sunrpc/rpc_pipe_fs.h> |
39 | #include <linux/sunrpc/metrics.h> |
40 | #include <linux/sunrpc/bc_xprt.h> |
41 | #include <trace/events/sunrpc.h> |
42 | |
43 | #include "sunrpc.h" |
44 | #include "sysfs.h" |
45 | #include "netns.h" |
46 | |
47 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
48 | # define RPCDBG_FACILITY RPCDBG_CALL |
49 | #endif |
50 | |
51 | /* |
52 | * All RPC clients are linked into this list |
53 | */ |
54 | |
55 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
56 | |
57 | |
58 | static void call_start(struct rpc_task *task); |
59 | static void call_reserve(struct rpc_task *task); |
60 | static void call_reserveresult(struct rpc_task *task); |
61 | static void call_allocate(struct rpc_task *task); |
62 | static void call_encode(struct rpc_task *task); |
63 | static void call_decode(struct rpc_task *task); |
64 | static void call_bind(struct rpc_task *task); |
65 | static void call_bind_status(struct rpc_task *task); |
66 | static void call_transmit(struct rpc_task *task); |
67 | static void call_status(struct rpc_task *task); |
68 | static void call_transmit_status(struct rpc_task *task); |
69 | static void call_refresh(struct rpc_task *task); |
70 | static void call_refreshresult(struct rpc_task *task); |
71 | static void call_connect(struct rpc_task *task); |
72 | static void call_connect_status(struct rpc_task *task); |
73 | |
74 | static int rpc_encode_header(struct rpc_task *task, |
75 | struct xdr_stream *xdr); |
76 | static int rpc_decode_header(struct rpc_task *task, |
77 | struct xdr_stream *xdr); |
78 | static int rpc_ping(struct rpc_clnt *clnt); |
79 | static int rpc_ping_noreply(struct rpc_clnt *clnt); |
80 | static void rpc_check_timeout(struct rpc_task *task); |
81 | |
82 | static void rpc_register_client(struct rpc_clnt *clnt) |
83 | { |
84 | struct net *net = rpc_net_ns(clnt); |
85 | struct sunrpc_net *sn = net_generic(net, id: sunrpc_net_id); |
86 | |
87 | spin_lock(lock: &sn->rpc_client_lock); |
88 | list_add(new: &clnt->cl_clients, head: &sn->all_clients); |
89 | spin_unlock(lock: &sn->rpc_client_lock); |
90 | } |
91 | |
92 | static void rpc_unregister_client(struct rpc_clnt *clnt) |
93 | { |
94 | struct net *net = rpc_net_ns(clnt); |
95 | struct sunrpc_net *sn = net_generic(net, id: sunrpc_net_id); |
96 | |
97 | spin_lock(lock: &sn->rpc_client_lock); |
98 | list_del(entry: &clnt->cl_clients); |
99 | spin_unlock(lock: &sn->rpc_client_lock); |
100 | } |
101 | |
102 | static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) |
103 | { |
104 | rpc_remove_client_dir(clnt); |
105 | } |
106 | |
107 | static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) |
108 | { |
109 | struct net *net = rpc_net_ns(clnt); |
110 | struct super_block *pipefs_sb; |
111 | |
112 | pipefs_sb = rpc_get_sb_net(net); |
113 | if (pipefs_sb) { |
114 | if (pipefs_sb == clnt->pipefs_sb) |
115 | __rpc_clnt_remove_pipedir(clnt); |
116 | rpc_put_sb_net(net); |
117 | } |
118 | } |
119 | |
120 | static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, |
121 | struct rpc_clnt *clnt) |
122 | { |
123 | static uint32_t clntid; |
124 | const char *dir_name = clnt->cl_program->pipe_dir_name; |
125 | char name[15]; |
126 | struct dentry *dir, *dentry; |
127 | |
128 | dir = rpc_d_lookup_sb(sb, dir_name); |
129 | if (dir == NULL) { |
130 | pr_info("RPC: pipefs directory doesn't exist: %s\n" , dir_name); |
131 | return dir; |
132 | } |
133 | for (;;) { |
134 | snprintf(buf: name, size: sizeof(name), fmt: "clnt%x" , (unsigned int)clntid++); |
135 | name[sizeof(name) - 1] = '\0'; |
136 | dentry = rpc_create_client_dir(dir, name, clnt); |
137 | if (!IS_ERR(ptr: dentry)) |
138 | break; |
139 | if (dentry == ERR_PTR(error: -EEXIST)) |
140 | continue; |
141 | printk(KERN_INFO "RPC: Couldn't create pipefs entry" |
142 | " %s/%s, error %ld\n" , |
143 | dir_name, name, PTR_ERR(dentry)); |
144 | break; |
145 | } |
146 | dput(dir); |
147 | return dentry; |
148 | } |
149 | |
150 | static int |
151 | rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) |
152 | { |
153 | struct dentry *dentry; |
154 | |
155 | clnt->pipefs_sb = pipefs_sb; |
156 | |
157 | if (clnt->cl_program->pipe_dir_name != NULL) { |
158 | dentry = rpc_setup_pipedir_sb(sb: pipefs_sb, clnt); |
159 | if (IS_ERR(ptr: dentry)) |
160 | return PTR_ERR(ptr: dentry); |
161 | } |
162 | return 0; |
163 | } |
164 | |
165 | static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) |
166 | { |
167 | if (clnt->cl_program->pipe_dir_name == NULL) |
168 | return 1; |
169 | |
170 | switch (event) { |
171 | case RPC_PIPEFS_MOUNT: |
172 | if (clnt->cl_pipedir_objects.pdh_dentry != NULL) |
173 | return 1; |
174 | if (refcount_read(r: &clnt->cl_count) == 0) |
175 | return 1; |
176 | break; |
177 | case RPC_PIPEFS_UMOUNT: |
178 | if (clnt->cl_pipedir_objects.pdh_dentry == NULL) |
179 | return 1; |
180 | break; |
181 | } |
182 | return 0; |
183 | } |
184 | |
185 | static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, |
186 | struct super_block *sb) |
187 | { |
188 | struct dentry *dentry; |
189 | |
190 | switch (event) { |
191 | case RPC_PIPEFS_MOUNT: |
192 | dentry = rpc_setup_pipedir_sb(sb, clnt); |
193 | if (!dentry) |
194 | return -ENOENT; |
195 | if (IS_ERR(ptr: dentry)) |
196 | return PTR_ERR(ptr: dentry); |
197 | break; |
198 | case RPC_PIPEFS_UMOUNT: |
199 | __rpc_clnt_remove_pipedir(clnt); |
200 | break; |
201 | default: |
202 | printk(KERN_ERR "%s: unknown event: %ld\n" , __func__, event); |
203 | return -ENOTSUPP; |
204 | } |
205 | return 0; |
206 | } |
207 | |
208 | static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, |
209 | struct super_block *sb) |
210 | { |
211 | int error = 0; |
212 | |
213 | for (;; clnt = clnt->cl_parent) { |
214 | if (!rpc_clnt_skip_event(clnt, event)) |
215 | error = __rpc_clnt_handle_event(clnt, event, sb); |
216 | if (error || clnt == clnt->cl_parent) |
217 | break; |
218 | } |
219 | return error; |
220 | } |
221 | |
222 | static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) |
223 | { |
224 | struct sunrpc_net *sn = net_generic(net, id: sunrpc_net_id); |
225 | struct rpc_clnt *clnt; |
226 | |
227 | spin_lock(lock: &sn->rpc_client_lock); |
228 | list_for_each_entry(clnt, &sn->all_clients, cl_clients) { |
229 | if (rpc_clnt_skip_event(clnt, event)) |
230 | continue; |
231 | spin_unlock(lock: &sn->rpc_client_lock); |
232 | return clnt; |
233 | } |
234 | spin_unlock(lock: &sn->rpc_client_lock); |
235 | return NULL; |
236 | } |
237 | |
238 | static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, |
239 | void *ptr) |
240 | { |
241 | struct super_block *sb = ptr; |
242 | struct rpc_clnt *clnt; |
243 | int error = 0; |
244 | |
245 | while ((clnt = rpc_get_client_for_event(net: sb->s_fs_info, event))) { |
246 | error = __rpc_pipefs_event(clnt, event, sb); |
247 | if (error) |
248 | break; |
249 | } |
250 | return error; |
251 | } |
252 | |
253 | static struct notifier_block rpc_clients_block = { |
254 | .notifier_call = rpc_pipefs_event, |
255 | .priority = SUNRPC_PIPEFS_RPC_PRIO, |
256 | }; |
257 | |
258 | int rpc_clients_notifier_register(void) |
259 | { |
260 | return rpc_pipefs_notifier_register(&rpc_clients_block); |
261 | } |
262 | |
263 | void rpc_clients_notifier_unregister(void) |
264 | { |
265 | return rpc_pipefs_notifier_unregister(&rpc_clients_block); |
266 | } |
267 | |
268 | static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, |
269 | struct rpc_xprt *xprt, |
270 | const struct rpc_timeout *timeout) |
271 | { |
272 | struct rpc_xprt *old; |
273 | |
274 | spin_lock(lock: &clnt->cl_lock); |
275 | old = rcu_dereference_protected(clnt->cl_xprt, |
276 | lockdep_is_held(&clnt->cl_lock)); |
277 | |
278 | if (!xprt_bound(xprt)) |
279 | clnt->cl_autobind = 1; |
280 | |
281 | clnt->cl_timeout = timeout; |
282 | rcu_assign_pointer(clnt->cl_xprt, xprt); |
283 | spin_unlock(lock: &clnt->cl_lock); |
284 | |
285 | return old; |
286 | } |
287 | |
288 | static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) |
289 | { |
290 | ssize_t copied; |
291 | |
292 | copied = strscpy(clnt->cl_nodename, |
293 | nodename, sizeof(clnt->cl_nodename)); |
294 | |
295 | clnt->cl_nodelen = copied < 0 |
296 | ? sizeof(clnt->cl_nodename) - 1 |
297 | : copied; |
298 | } |
299 | |
300 | static int rpc_client_register(struct rpc_clnt *clnt, |
301 | rpc_authflavor_t pseudoflavor, |
302 | const char *client_name) |
303 | { |
304 | struct rpc_auth_create_args auth_args = { |
305 | .pseudoflavor = pseudoflavor, |
306 | .target_name = client_name, |
307 | }; |
308 | struct rpc_auth *auth; |
309 | struct net *net = rpc_net_ns(clnt); |
310 | struct super_block *pipefs_sb; |
311 | int err; |
312 | |
313 | rpc_clnt_debugfs_register(clnt); |
314 | |
315 | pipefs_sb = rpc_get_sb_net(net); |
316 | if (pipefs_sb) { |
317 | err = rpc_setup_pipedir(pipefs_sb, clnt); |
318 | if (err) |
319 | goto out; |
320 | } |
321 | |
322 | rpc_register_client(clnt); |
323 | if (pipefs_sb) |
324 | rpc_put_sb_net(net); |
325 | |
326 | auth = rpcauth_create(&auth_args, clnt); |
327 | if (IS_ERR(ptr: auth)) { |
328 | dprintk("RPC: Couldn't create auth handle (flavor %u)\n" , |
329 | pseudoflavor); |
330 | err = PTR_ERR(ptr: auth); |
331 | goto err_auth; |
332 | } |
333 | return 0; |
334 | err_auth: |
335 | pipefs_sb = rpc_get_sb_net(net); |
336 | rpc_unregister_client(clnt); |
337 | __rpc_clnt_remove_pipedir(clnt); |
338 | out: |
339 | if (pipefs_sb) |
340 | rpc_put_sb_net(net); |
341 | rpc_sysfs_client_destroy(clnt); |
342 | rpc_clnt_debugfs_unregister(clnt); |
343 | return err; |
344 | } |
345 | |
346 | static DEFINE_IDA(rpc_clids); |
347 | |
348 | void rpc_cleanup_clids(void) |
349 | { |
350 | ida_destroy(ida: &rpc_clids); |
351 | } |
352 | |
353 | static int rpc_alloc_clid(struct rpc_clnt *clnt) |
354 | { |
355 | int clid; |
356 | |
357 | clid = ida_alloc(ida: &rpc_clids, GFP_KERNEL); |
358 | if (clid < 0) |
359 | return clid; |
360 | clnt->cl_clid = clid; |
361 | return 0; |
362 | } |
363 | |
364 | static void rpc_free_clid(struct rpc_clnt *clnt) |
365 | { |
366 | ida_free(&rpc_clids, id: clnt->cl_clid); |
367 | } |
368 | |
369 | static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, |
370 | struct rpc_xprt_switch *xps, |
371 | struct rpc_xprt *xprt, |
372 | struct rpc_clnt *parent) |
373 | { |
374 | const struct rpc_program *program = args->program; |
375 | const struct rpc_version *version; |
376 | struct rpc_clnt *clnt = NULL; |
377 | const struct rpc_timeout *timeout; |
378 | const char *nodename = args->nodename; |
379 | int err; |
380 | |
381 | err = rpciod_up(); |
382 | if (err) |
383 | goto out_no_rpciod; |
384 | |
385 | err = -EINVAL; |
386 | if (args->version >= program->nrvers) |
387 | goto out_err; |
388 | version = program->version[args->version]; |
389 | if (version == NULL) |
390 | goto out_err; |
391 | |
392 | err = -ENOMEM; |
393 | clnt = kzalloc(size: sizeof(*clnt), GFP_KERNEL); |
394 | if (!clnt) |
395 | goto out_err; |
396 | clnt->cl_parent = parent ? : clnt; |
397 | clnt->cl_xprtsec = args->xprtsec; |
398 | |
399 | err = rpc_alloc_clid(clnt); |
400 | if (err) |
401 | goto out_no_clid; |
402 | |
403 | clnt->cl_cred = get_cred(cred: args->cred); |
404 | clnt->cl_procinfo = version->procs; |
405 | clnt->cl_maxproc = version->nrprocs; |
406 | clnt->cl_prog = args->prognumber ? : program->number; |
407 | clnt->cl_vers = version->number; |
408 | clnt->cl_stats = args->stats ? : program->stats; |
409 | clnt->cl_metrics = rpc_alloc_iostats(clnt); |
410 | rpc_init_pipe_dir_head(pdh: &clnt->cl_pipedir_objects); |
411 | err = -ENOMEM; |
412 | if (clnt->cl_metrics == NULL) |
413 | goto out_no_stats; |
414 | clnt->cl_program = program; |
415 | INIT_LIST_HEAD(list: &clnt->cl_tasks); |
416 | spin_lock_init(&clnt->cl_lock); |
417 | |
418 | timeout = xprt->timeout; |
419 | if (args->timeout != NULL) { |
420 | memcpy(&clnt->cl_timeout_default, args->timeout, |
421 | sizeof(clnt->cl_timeout_default)); |
422 | timeout = &clnt->cl_timeout_default; |
423 | } |
424 | |
425 | rpc_clnt_set_transport(clnt, xprt, timeout); |
426 | xprt->main = true; |
427 | xprt_iter_init(xpi: &clnt->cl_xpi, xps); |
428 | xprt_switch_put(xps); |
429 | |
430 | clnt->cl_rtt = &clnt->cl_rtt_default; |
431 | rpc_init_rtt(rt: &clnt->cl_rtt_default, timeo: clnt->cl_timeout->to_initval); |
432 | |
433 | refcount_set(r: &clnt->cl_count, n: 1); |
434 | |
435 | if (nodename == NULL) |
436 | nodename = utsname()->nodename; |
437 | /* save the nodename */ |
438 | rpc_clnt_set_nodename(clnt, nodename); |
439 | |
440 | rpc_sysfs_client_setup(clnt, xprt_switch: xps, net: rpc_net_ns(clnt)); |
441 | err = rpc_client_register(clnt, pseudoflavor: args->authflavor, client_name: args->client_name); |
442 | if (err) |
443 | goto out_no_path; |
444 | if (parent) |
445 | refcount_inc(r: &parent->cl_count); |
446 | |
447 | trace_rpc_clnt_new(clnt, xprt, args); |
448 | return clnt; |
449 | |
450 | out_no_path: |
451 | rpc_free_iostats(clnt->cl_metrics); |
452 | out_no_stats: |
453 | put_cred(cred: clnt->cl_cred); |
454 | rpc_free_clid(clnt); |
455 | out_no_clid: |
456 | kfree(objp: clnt); |
457 | out_err: |
458 | rpciod_down(); |
459 | out_no_rpciod: |
460 | xprt_switch_put(xps); |
461 | xprt_put(xprt); |
462 | trace_rpc_clnt_new_err(program: program->name, server: args->servername, error: err); |
463 | return ERR_PTR(error: err); |
464 | } |
465 | |
466 | static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, |
467 | struct rpc_xprt *xprt) |
468 | { |
469 | struct rpc_clnt *clnt = NULL; |
470 | struct rpc_xprt_switch *xps; |
471 | |
472 | if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { |
473 | WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); |
474 | xps = args->bc_xprt->xpt_bc_xps; |
475 | xprt_switch_get(xps); |
476 | } else { |
477 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); |
478 | if (xps == NULL) { |
479 | xprt_put(xprt); |
480 | return ERR_PTR(error: -ENOMEM); |
481 | } |
482 | if (xprt->bc_xprt) { |
483 | xprt_switch_get(xps); |
484 | xprt->bc_xprt->xpt_bc_xps = xps; |
485 | } |
486 | } |
487 | clnt = rpc_new_client(args, xps, xprt, NULL); |
488 | if (IS_ERR(ptr: clnt)) |
489 | return clnt; |
490 | |
491 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { |
492 | int err = rpc_ping(clnt); |
493 | if (err != 0) { |
494 | rpc_shutdown_client(clnt); |
495 | return ERR_PTR(error: err); |
496 | } |
497 | } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { |
498 | int err = rpc_ping_noreply(clnt); |
499 | if (err != 0) { |
500 | rpc_shutdown_client(clnt); |
501 | return ERR_PTR(error: err); |
502 | } |
503 | } |
504 | |
505 | clnt->cl_softrtry = 1; |
506 | if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { |
507 | clnt->cl_softrtry = 0; |
508 | if (args->flags & RPC_CLNT_CREATE_SOFTERR) |
509 | clnt->cl_softerr = 1; |
510 | } |
511 | |
512 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) |
513 | clnt->cl_autobind = 1; |
514 | if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) |
515 | clnt->cl_noretranstimeo = 1; |
516 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) |
517 | clnt->cl_discrtry = 1; |
518 | if (!(args->flags & RPC_CLNT_CREATE_QUIET)) |
519 | clnt->cl_chatty = 1; |
520 | |
521 | return clnt; |
522 | } |
523 | |
524 | /** |
525 | * rpc_create - create an RPC client and transport with one call |
526 | * @args: rpc_clnt create argument structure |
527 | * |
528 | * Creates and initializes an RPC transport and an RPC client. |
529 | * |
530 | * It can ping the server in order to determine if it is up, and to see if |
531 | * it supports this program and version. RPC_CLNT_CREATE_NOPING disables |
532 | * this behavior so asynchronous tasks can also use rpc_create. |
533 | */ |
534 | struct rpc_clnt *rpc_create(struct rpc_create_args *args) |
535 | { |
536 | struct rpc_xprt *xprt; |
537 | struct xprt_create xprtargs = { |
538 | .net = args->net, |
539 | .ident = args->protocol, |
540 | .srcaddr = args->saddress, |
541 | .dstaddr = args->address, |
542 | .addrlen = args->addrsize, |
543 | .servername = args->servername, |
544 | .bc_xprt = args->bc_xprt, |
545 | .xprtsec = args->xprtsec, |
546 | .connect_timeout = args->connect_timeout, |
547 | .reconnect_timeout = args->reconnect_timeout, |
548 | }; |
549 | char servername[48]; |
550 | struct rpc_clnt *clnt; |
551 | int i; |
552 | |
553 | if (args->bc_xprt) { |
554 | WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); |
555 | xprt = args->bc_xprt->xpt_bc_xprt; |
556 | if (xprt) { |
557 | xprt_get(xprt); |
558 | return rpc_create_xprt(args, xprt); |
559 | } |
560 | } |
561 | |
562 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) |
563 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; |
564 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) |
565 | xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; |
566 | /* |
567 | * If the caller chooses not to specify a hostname, whip |
568 | * up a string representation of the passed-in address. |
569 | */ |
570 | if (xprtargs.servername == NULL) { |
571 | struct sockaddr_un *sun = |
572 | (struct sockaddr_un *)args->address; |
573 | struct sockaddr_in *sin = |
574 | (struct sockaddr_in *)args->address; |
575 | struct sockaddr_in6 *sin6 = |
576 | (struct sockaddr_in6 *)args->address; |
577 | |
578 | servername[0] = '\0'; |
579 | switch (args->address->sa_family) { |
580 | case AF_LOCAL: |
581 | if (sun->sun_path[0]) |
582 | snprintf(buf: servername, size: sizeof(servername), fmt: "%s" , |
583 | sun->sun_path); |
584 | else |
585 | snprintf(buf: servername, size: sizeof(servername), fmt: "@%s" , |
586 | sun->sun_path+1); |
587 | break; |
588 | case AF_INET: |
589 | snprintf(buf: servername, size: sizeof(servername), fmt: "%pI4" , |
590 | &sin->sin_addr.s_addr); |
591 | break; |
592 | case AF_INET6: |
593 | snprintf(buf: servername, size: sizeof(servername), fmt: "%pI6" , |
594 | &sin6->sin6_addr); |
595 | break; |
596 | default: |
597 | /* caller wants default server name, but |
598 | * address family isn't recognized. */ |
599 | return ERR_PTR(error: -EINVAL); |
600 | } |
601 | xprtargs.servername = servername; |
602 | } |
603 | |
604 | xprt = xprt_create_transport(args: &xprtargs); |
605 | if (IS_ERR(ptr: xprt)) |
606 | return (struct rpc_clnt *)xprt; |
607 | |
608 | /* |
609 | * By default, kernel RPC client connects from a reserved port. |
610 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, |
611 | * but it is always enabled for rpciod, which handles the connect |
612 | * operation. |
613 | */ |
614 | xprt->resvport = 1; |
615 | if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) |
616 | xprt->resvport = 0; |
617 | xprt->reuseport = 0; |
618 | if (args->flags & RPC_CLNT_CREATE_REUSEPORT) |
619 | xprt->reuseport = 1; |
620 | |
621 | clnt = rpc_create_xprt(args, xprt); |
622 | if (IS_ERR(ptr: clnt) || args->nconnect <= 1) |
623 | return clnt; |
624 | |
625 | for (i = 0; i < args->nconnect - 1; i++) { |
626 | if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) |
627 | break; |
628 | } |
629 | return clnt; |
630 | } |
631 | EXPORT_SYMBOL_GPL(rpc_create); |
632 | |
633 | /* |
634 | * This function clones the RPC client structure. It allows us to share the |
635 | * same transport while varying parameters such as the authentication |
636 | * flavour. |
637 | */ |
638 | static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, |
639 | struct rpc_clnt *clnt) |
640 | { |
641 | struct rpc_xprt_switch *xps; |
642 | struct rpc_xprt *xprt; |
643 | struct rpc_clnt *new; |
644 | int err; |
645 | |
646 | err = -ENOMEM; |
647 | rcu_read_lock(); |
648 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
649 | xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); |
650 | rcu_read_unlock(); |
651 | if (xprt == NULL || xps == NULL) { |
652 | xprt_put(xprt); |
653 | xprt_switch_put(xps); |
654 | goto out_err; |
655 | } |
656 | args->servername = xprt->servername; |
657 | args->nodename = clnt->cl_nodename; |
658 | |
659 | new = rpc_new_client(args, xps, xprt, parent: clnt); |
660 | if (IS_ERR(ptr: new)) |
661 | return new; |
662 | |
663 | /* Turn off autobind on clones */ |
664 | new->cl_autobind = 0; |
665 | new->cl_softrtry = clnt->cl_softrtry; |
666 | new->cl_softerr = clnt->cl_softerr; |
667 | new->cl_noretranstimeo = clnt->cl_noretranstimeo; |
668 | new->cl_discrtry = clnt->cl_discrtry; |
669 | new->cl_chatty = clnt->cl_chatty; |
670 | new->cl_principal = clnt->cl_principal; |
671 | new->cl_max_connect = clnt->cl_max_connect; |
672 | return new; |
673 | |
674 | out_err: |
675 | trace_rpc_clnt_clone_err(clnt, error: err); |
676 | return ERR_PTR(error: err); |
677 | } |
678 | |
679 | /** |
680 | * rpc_clone_client - Clone an RPC client structure |
681 | * |
682 | * @clnt: RPC client whose parameters are copied |
683 | * |
684 | * Returns a fresh RPC client or an ERR_PTR. |
685 | */ |
686 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) |
687 | { |
688 | struct rpc_create_args args = { |
689 | .program = clnt->cl_program, |
690 | .prognumber = clnt->cl_prog, |
691 | .version = clnt->cl_vers, |
692 | .authflavor = clnt->cl_auth->au_flavor, |
693 | .cred = clnt->cl_cred, |
694 | .stats = clnt->cl_stats, |
695 | }; |
696 | return __rpc_clone_client(args: &args, clnt); |
697 | } |
698 | EXPORT_SYMBOL_GPL(rpc_clone_client); |
699 | |
700 | /** |
701 | * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth |
702 | * |
703 | * @clnt: RPC client whose parameters are copied |
704 | * @flavor: security flavor for new client |
705 | * |
706 | * Returns a fresh RPC client or an ERR_PTR. |
707 | */ |
708 | struct rpc_clnt * |
709 | rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) |
710 | { |
711 | struct rpc_create_args args = { |
712 | .program = clnt->cl_program, |
713 | .prognumber = clnt->cl_prog, |
714 | .version = clnt->cl_vers, |
715 | .authflavor = flavor, |
716 | .cred = clnt->cl_cred, |
717 | .stats = clnt->cl_stats, |
718 | }; |
719 | return __rpc_clone_client(args: &args, clnt); |
720 | } |
721 | EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); |
722 | |
723 | /** |
724 | * rpc_switch_client_transport: switch the RPC transport on the fly |
725 | * @clnt: pointer to a struct rpc_clnt |
726 | * @args: pointer to the new transport arguments |
727 | * @timeout: pointer to the new timeout parameters |
728 | * |
729 | * This function allows the caller to switch the RPC transport for the |
730 | * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS |
731 | * server, for instance. It assumes that the caller has ensured that |
732 | * there are no active RPC tasks by using some form of locking. |
733 | * |
734 | * Returns zero if "clnt" is now using the new xprt. Otherwise a |
735 | * negative errno is returned, and "clnt" continues to use the old |
736 | * xprt. |
737 | */ |
738 | int rpc_switch_client_transport(struct rpc_clnt *clnt, |
739 | struct xprt_create *args, |
740 | const struct rpc_timeout *timeout) |
741 | { |
742 | const struct rpc_timeout *old_timeo; |
743 | rpc_authflavor_t pseudoflavor; |
744 | struct rpc_xprt_switch *xps, *oldxps; |
745 | struct rpc_xprt *xprt, *old; |
746 | struct rpc_clnt *parent; |
747 | int err; |
748 | |
749 | args->xprtsec = clnt->cl_xprtsec; |
750 | xprt = xprt_create_transport(args); |
751 | if (IS_ERR(ptr: xprt)) |
752 | return PTR_ERR(ptr: xprt); |
753 | |
754 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); |
755 | if (xps == NULL) { |
756 | xprt_put(xprt); |
757 | return -ENOMEM; |
758 | } |
759 | |
760 | pseudoflavor = clnt->cl_auth->au_flavor; |
761 | |
762 | old_timeo = clnt->cl_timeout; |
763 | old = rpc_clnt_set_transport(clnt, xprt, timeout); |
764 | oldxps = xprt_iter_xchg_switch(xpi: &clnt->cl_xpi, newswitch: xps); |
765 | |
766 | rpc_unregister_client(clnt); |
767 | __rpc_clnt_remove_pipedir(clnt); |
768 | rpc_sysfs_client_destroy(clnt); |
769 | rpc_clnt_debugfs_unregister(clnt); |
770 | |
771 | /* |
772 | * A new transport was created. "clnt" therefore |
773 | * becomes the root of a new cl_parent tree. clnt's |
774 | * children, if it has any, still point to the old xprt. |
775 | */ |
776 | parent = clnt->cl_parent; |
777 | clnt->cl_parent = clnt; |
778 | |
779 | /* |
780 | * The old rpc_auth cache cannot be re-used. GSS |
781 | * contexts in particular are between a single |
782 | * client and server. |
783 | */ |
784 | err = rpc_client_register(clnt, pseudoflavor, NULL); |
785 | if (err) |
786 | goto out_revert; |
787 | |
788 | synchronize_rcu(); |
789 | if (parent != clnt) |
790 | rpc_release_client(parent); |
791 | xprt_switch_put(xps: oldxps); |
792 | xprt_put(xprt: old); |
793 | trace_rpc_clnt_replace_xprt(clnt); |
794 | return 0; |
795 | |
796 | out_revert: |
797 | xps = xprt_iter_xchg_switch(xpi: &clnt->cl_xpi, newswitch: oldxps); |
798 | rpc_clnt_set_transport(clnt, xprt: old, timeout: old_timeo); |
799 | clnt->cl_parent = parent; |
800 | rpc_client_register(clnt, pseudoflavor, NULL); |
801 | xprt_switch_put(xps); |
802 | xprt_put(xprt); |
803 | trace_rpc_clnt_replace_xprt_err(clnt); |
804 | return err; |
805 | } |
806 | EXPORT_SYMBOL_GPL(rpc_switch_client_transport); |
807 | |
808 | static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) |
809 | { |
810 | struct rpc_xprt_switch *xps; |
811 | |
812 | rcu_read_lock(); |
813 | xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); |
814 | rcu_read_unlock(); |
815 | |
816 | return xps; |
817 | } |
818 | |
819 | static |
820 | int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, |
821 | void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) |
822 | { |
823 | struct rpc_xprt_switch *xps; |
824 | |
825 | xps = rpc_clnt_xprt_switch_get(clnt); |
826 | if (xps == NULL) |
827 | return -EAGAIN; |
828 | func(xpi, xps); |
829 | xprt_switch_put(xps); |
830 | return 0; |
831 | } |
832 | |
833 | static |
834 | int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) |
835 | { |
836 | return _rpc_clnt_xprt_iter_init(clnt, xpi, func: xprt_iter_init_listall); |
837 | } |
838 | |
839 | static |
840 | int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, |
841 | struct rpc_xprt_iter *xpi) |
842 | { |
843 | return _rpc_clnt_xprt_iter_init(clnt, xpi, func: xprt_iter_init_listoffline); |
844 | } |
845 | |
846 | /** |
847 | * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports |
848 | * @clnt: pointer to client |
849 | * @fn: function to apply |
850 | * @data: void pointer to function data |
851 | * |
852 | * Iterates through the list of RPC transports currently attached to the |
853 | * client and applies the function fn(clnt, xprt, data). |
854 | * |
855 | * On error, the iteration stops, and the function returns the error value. |
856 | */ |
857 | int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, |
858 | int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), |
859 | void *data) |
860 | { |
861 | struct rpc_xprt_iter xpi; |
862 | int ret; |
863 | |
864 | ret = rpc_clnt_xprt_iter_init(clnt, xpi: &xpi); |
865 | if (ret) |
866 | return ret; |
867 | for (;;) { |
868 | struct rpc_xprt *xprt = xprt_iter_get_next(xpi: &xpi); |
869 | |
870 | if (!xprt) |
871 | break; |
872 | ret = fn(clnt, xprt, data); |
873 | xprt_put(xprt); |
874 | if (ret < 0) |
875 | break; |
876 | } |
877 | xprt_iter_destroy(xpi: &xpi); |
878 | return ret; |
879 | } |
880 | EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); |
881 | |
882 | /* |
883 | * Kill all tasks for the given client. |
884 | * XXX: kill their descendants as well? |
885 | */ |
886 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
887 | { |
888 | struct rpc_task *rovr; |
889 | |
890 | |
891 | if (list_empty(head: &clnt->cl_tasks)) |
892 | return; |
893 | |
894 | /* |
895 | * Spin lock all_tasks to prevent changes... |
896 | */ |
897 | trace_rpc_clnt_killall(clnt); |
898 | spin_lock(lock: &clnt->cl_lock); |
899 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) |
900 | rpc_signal_task(rovr); |
901 | spin_unlock(lock: &clnt->cl_lock); |
902 | } |
903 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); |
904 | |
905 | /** |
906 | * rpc_cancel_tasks - try to cancel a set of RPC tasks |
907 | * @clnt: Pointer to RPC client |
908 | * @error: RPC task error value to set |
909 | * @fnmatch: Pointer to selector function |
910 | * @data: User data |
911 | * |
912 | * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. |
913 | * The argument @error must be a negative error value. |
914 | */ |
915 | unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, |
916 | bool (*fnmatch)(const struct rpc_task *, |
917 | const void *), |
918 | const void *data) |
919 | { |
920 | struct rpc_task *task; |
921 | unsigned long count = 0; |
922 | |
923 | if (list_empty(head: &clnt->cl_tasks)) |
924 | return 0; |
925 | /* |
926 | * Spin lock all_tasks to prevent changes... |
927 | */ |
928 | spin_lock(lock: &clnt->cl_lock); |
929 | list_for_each_entry(task, &clnt->cl_tasks, tk_task) { |
930 | if (!RPC_IS_ACTIVATED(task)) |
931 | continue; |
932 | if (!fnmatch(task, data)) |
933 | continue; |
934 | rpc_task_try_cancel(task, error); |
935 | count++; |
936 | } |
937 | spin_unlock(lock: &clnt->cl_lock); |
938 | return count; |
939 | } |
940 | EXPORT_SYMBOL_GPL(rpc_cancel_tasks); |
941 | |
942 | static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, |
943 | struct rpc_xprt *xprt, void *dummy) |
944 | { |
945 | if (xprt_connected(xprt)) |
946 | xprt_force_disconnect(xprt); |
947 | return 0; |
948 | } |
949 | |
950 | void rpc_clnt_disconnect(struct rpc_clnt *clnt) |
951 | { |
952 | rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); |
953 | } |
954 | EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); |
955 | |
956 | /* |
957 | * Properly shut down an RPC client, terminating all outstanding |
958 | * requests. |
959 | */ |
960 | void rpc_shutdown_client(struct rpc_clnt *clnt) |
961 | { |
962 | might_sleep(); |
963 | |
964 | trace_rpc_clnt_shutdown(clnt); |
965 | |
966 | while (!list_empty(head: &clnt->cl_tasks)) { |
967 | rpc_killall_tasks(clnt); |
968 | wait_event_timeout(destroy_wait, |
969 | list_empty(&clnt->cl_tasks), 1*HZ); |
970 | } |
971 | |
972 | rpc_release_client(clnt); |
973 | } |
974 | EXPORT_SYMBOL_GPL(rpc_shutdown_client); |
975 | |
976 | /* |
977 | * Free an RPC client |
978 | */ |
979 | static void rpc_free_client_work(struct work_struct *work) |
980 | { |
981 | struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); |
982 | |
983 | trace_rpc_clnt_free(clnt); |
984 | |
985 | /* These might block on processes that might allocate memory, |
986 | * so they cannot be called in rpciod, so they are handled separately |
987 | * here. |
988 | */ |
989 | rpc_sysfs_client_destroy(clnt); |
990 | rpc_clnt_debugfs_unregister(clnt); |
991 | rpc_free_clid(clnt); |
992 | rpc_clnt_remove_pipedir(clnt); |
993 | xprt_put(rcu_dereference_raw(clnt->cl_xprt)); |
994 | |
995 | kfree(objp: clnt); |
996 | rpciod_down(); |
997 | } |
998 | static struct rpc_clnt * |
999 | rpc_free_client(struct rpc_clnt *clnt) |
1000 | { |
1001 | struct rpc_clnt *parent = NULL; |
1002 | |
1003 | trace_rpc_clnt_release(clnt); |
1004 | if (clnt->cl_parent != clnt) |
1005 | parent = clnt->cl_parent; |
1006 | rpc_unregister_client(clnt); |
1007 | rpc_free_iostats(clnt->cl_metrics); |
1008 | clnt->cl_metrics = NULL; |
1009 | xprt_iter_destroy(xpi: &clnt->cl_xpi); |
1010 | put_cred(cred: clnt->cl_cred); |
1011 | |
1012 | INIT_WORK(&clnt->cl_work, rpc_free_client_work); |
1013 | schedule_work(work: &clnt->cl_work); |
1014 | return parent; |
1015 | } |
1016 | |
1017 | /* |
1018 | * Free an RPC client |
1019 | */ |
1020 | static struct rpc_clnt * |
1021 | rpc_free_auth(struct rpc_clnt *clnt) |
1022 | { |
1023 | /* |
1024 | * Note: RPCSEC_GSS may need to send NULL RPC calls in order to |
1025 | * release remaining GSS contexts. This mechanism ensures |
1026 | * that it can do so safely. |
1027 | */ |
1028 | if (clnt->cl_auth != NULL) { |
1029 | rpcauth_release(clnt->cl_auth); |
1030 | clnt->cl_auth = NULL; |
1031 | } |
1032 | if (refcount_dec_and_test(r: &clnt->cl_count)) |
1033 | return rpc_free_client(clnt); |
1034 | return NULL; |
1035 | } |
1036 | |
1037 | /* |
1038 | * Release reference to the RPC client |
1039 | */ |
1040 | void |
1041 | rpc_release_client(struct rpc_clnt *clnt) |
1042 | { |
1043 | do { |
1044 | if (list_empty(head: &clnt->cl_tasks)) |
1045 | wake_up(&destroy_wait); |
1046 | if (refcount_dec_not_one(r: &clnt->cl_count)) |
1047 | break; |
1048 | clnt = rpc_free_auth(clnt); |
1049 | } while (clnt != NULL); |
1050 | } |
1051 | EXPORT_SYMBOL_GPL(rpc_release_client); |
1052 | |
1053 | /** |
1054 | * rpc_bind_new_program - bind a new RPC program to an existing client |
1055 | * @old: old rpc_client |
1056 | * @program: rpc program to set |
1057 | * @vers: rpc program version |
1058 | * |
1059 | * Clones the rpc client and sets up a new RPC program. This is mainly |
1060 | * of use for enabling different RPC programs to share the same transport. |
1061 | * The Sun NFSv2/v3 ACL protocol can do this. |
1062 | */ |
1063 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, |
1064 | const struct rpc_program *program, |
1065 | u32 vers) |
1066 | { |
1067 | struct rpc_create_args args = { |
1068 | .program = program, |
1069 | .prognumber = program->number, |
1070 | .version = vers, |
1071 | .authflavor = old->cl_auth->au_flavor, |
1072 | .cred = old->cl_cred, |
1073 | .stats = old->cl_stats, |
1074 | }; |
1075 | struct rpc_clnt *clnt; |
1076 | int err; |
1077 | |
1078 | clnt = __rpc_clone_client(args: &args, clnt: old); |
1079 | if (IS_ERR(ptr: clnt)) |
1080 | goto out; |
1081 | err = rpc_ping(clnt); |
1082 | if (err != 0) { |
1083 | rpc_shutdown_client(clnt); |
1084 | clnt = ERR_PTR(error: err); |
1085 | } |
1086 | out: |
1087 | return clnt; |
1088 | } |
1089 | EXPORT_SYMBOL_GPL(rpc_bind_new_program); |
1090 | |
1091 | struct rpc_xprt * |
1092 | rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
1093 | { |
1094 | struct rpc_xprt_switch *xps; |
1095 | |
1096 | if (!xprt) |
1097 | return NULL; |
1098 | rcu_read_lock(); |
1099 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); |
1100 | atomic_long_inc(v: &xps->xps_queuelen); |
1101 | rcu_read_unlock(); |
1102 | atomic_long_inc(v: &xprt->queuelen); |
1103 | |
1104 | return xprt; |
1105 | } |
1106 | |
1107 | static void |
1108 | rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
1109 | { |
1110 | struct rpc_xprt_switch *xps; |
1111 | |
1112 | atomic_long_dec(v: &xprt->queuelen); |
1113 | rcu_read_lock(); |
1114 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); |
1115 | atomic_long_dec(v: &xps->xps_queuelen); |
1116 | rcu_read_unlock(); |
1117 | |
1118 | xprt_put(xprt); |
1119 | } |
1120 | |
1121 | void rpc_task_release_transport(struct rpc_task *task) |
1122 | { |
1123 | struct rpc_xprt *xprt = task->tk_xprt; |
1124 | |
1125 | if (xprt) { |
1126 | task->tk_xprt = NULL; |
1127 | if (task->tk_client) |
1128 | rpc_task_release_xprt(clnt: task->tk_client, xprt); |
1129 | else |
1130 | xprt_put(xprt); |
1131 | } |
1132 | } |
1133 | EXPORT_SYMBOL_GPL(rpc_task_release_transport); |
1134 | |
1135 | void rpc_task_release_client(struct rpc_task *task) |
1136 | { |
1137 | struct rpc_clnt *clnt = task->tk_client; |
1138 | |
1139 | rpc_task_release_transport(task); |
1140 | if (clnt != NULL) { |
1141 | /* Remove from client task list */ |
1142 | spin_lock(lock: &clnt->cl_lock); |
1143 | list_del(entry: &task->tk_task); |
1144 | spin_unlock(lock: &clnt->cl_lock); |
1145 | task->tk_client = NULL; |
1146 | |
1147 | rpc_release_client(clnt); |
1148 | } |
1149 | } |
1150 | |
1151 | static struct rpc_xprt * |
1152 | rpc_task_get_first_xprt(struct rpc_clnt *clnt) |
1153 | { |
1154 | struct rpc_xprt *xprt; |
1155 | |
1156 | rcu_read_lock(); |
1157 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
1158 | rcu_read_unlock(); |
1159 | return rpc_task_get_xprt(clnt, xprt); |
1160 | } |
1161 | |
1162 | static struct rpc_xprt * |
1163 | rpc_task_get_next_xprt(struct rpc_clnt *clnt) |
1164 | { |
1165 | return rpc_task_get_xprt(clnt, xprt: xprt_iter_get_next(xpi: &clnt->cl_xpi)); |
1166 | } |
1167 | |
1168 | static |
1169 | void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) |
1170 | { |
1171 | if (task->tk_xprt) { |
1172 | if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && |
1173 | (task->tk_flags & RPC_TASK_MOVEABLE))) |
1174 | return; |
1175 | xprt_release(task); |
1176 | xprt_put(xprt: task->tk_xprt); |
1177 | } |
1178 | if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) |
1179 | task->tk_xprt = rpc_task_get_first_xprt(clnt); |
1180 | else |
1181 | task->tk_xprt = rpc_task_get_next_xprt(clnt); |
1182 | } |
1183 | |
1184 | static |
1185 | void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) |
1186 | { |
1187 | rpc_task_set_transport(task, clnt); |
1188 | task->tk_client = clnt; |
1189 | refcount_inc(r: &clnt->cl_count); |
1190 | if (clnt->cl_softrtry) |
1191 | task->tk_flags |= RPC_TASK_SOFT; |
1192 | if (clnt->cl_softerr) |
1193 | task->tk_flags |= RPC_TASK_TIMEOUT; |
1194 | if (clnt->cl_noretranstimeo) |
1195 | task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; |
1196 | /* Add to the client's list of all tasks */ |
1197 | spin_lock(lock: &clnt->cl_lock); |
1198 | list_add_tail(new: &task->tk_task, head: &clnt->cl_tasks); |
1199 | spin_unlock(lock: &clnt->cl_lock); |
1200 | } |
1201 | |
1202 | static void |
1203 | rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) |
1204 | { |
1205 | if (msg != NULL) { |
1206 | task->tk_msg.rpc_proc = msg->rpc_proc; |
1207 | task->tk_msg.rpc_argp = msg->rpc_argp; |
1208 | task->tk_msg.rpc_resp = msg->rpc_resp; |
1209 | task->tk_msg.rpc_cred = msg->rpc_cred; |
1210 | if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) |
1211 | get_cred(cred: task->tk_msg.rpc_cred); |
1212 | } |
1213 | } |
1214 | |
1215 | /* |
1216 | * Default callback for async RPC calls |
1217 | */ |
1218 | static void |
1219 | rpc_default_callback(struct rpc_task *task, void *data) |
1220 | { |
1221 | } |
1222 | |
1223 | static const struct rpc_call_ops rpc_default_ops = { |
1224 | .rpc_call_done = rpc_default_callback, |
1225 | }; |
1226 | |
1227 | /** |
1228 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it |
1229 | * @task_setup_data: pointer to task initialisation data |
1230 | */ |
1231 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) |
1232 | { |
1233 | struct rpc_task *task; |
1234 | |
1235 | task = rpc_new_task(task_setup_data); |
1236 | if (IS_ERR(ptr: task)) |
1237 | return task; |
1238 | |
1239 | if (!RPC_IS_ASYNC(task)) |
1240 | task->tk_flags |= RPC_TASK_CRED_NOREF; |
1241 | |
1242 | rpc_task_set_client(task, clnt: task_setup_data->rpc_client); |
1243 | rpc_task_set_rpc_message(task, msg: task_setup_data->rpc_message); |
1244 | |
1245 | if (task->tk_action == NULL) |
1246 | rpc_call_start(task); |
1247 | |
1248 | atomic_inc(v: &task->tk_count); |
1249 | rpc_execute(task); |
1250 | return task; |
1251 | } |
1252 | EXPORT_SYMBOL_GPL(rpc_run_task); |
1253 | |
1254 | /** |
1255 | * rpc_call_sync - Perform a synchronous RPC call |
1256 | * @clnt: pointer to RPC client |
1257 | * @msg: RPC call parameters |
1258 | * @flags: RPC call flags |
1259 | */ |
1260 | int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) |
1261 | { |
1262 | struct rpc_task *task; |
1263 | struct rpc_task_setup task_setup_data = { |
1264 | .rpc_client = clnt, |
1265 | .rpc_message = msg, |
1266 | .callback_ops = &rpc_default_ops, |
1267 | .flags = flags, |
1268 | }; |
1269 | int status; |
1270 | |
1271 | WARN_ON_ONCE(flags & RPC_TASK_ASYNC); |
1272 | if (flags & RPC_TASK_ASYNC) { |
1273 | rpc_release_calldata(task_setup_data.callback_ops, |
1274 | task_setup_data.callback_data); |
1275 | return -EINVAL; |
1276 | } |
1277 | |
1278 | task = rpc_run_task(&task_setup_data); |
1279 | if (IS_ERR(ptr: task)) |
1280 | return PTR_ERR(ptr: task); |
1281 | status = task->tk_status; |
1282 | rpc_put_task(task); |
1283 | return status; |
1284 | } |
1285 | EXPORT_SYMBOL_GPL(rpc_call_sync); |
1286 | |
1287 | /** |
1288 | * rpc_call_async - Perform an asynchronous RPC call |
1289 | * @clnt: pointer to RPC client |
1290 | * @msg: RPC call parameters |
1291 | * @flags: RPC call flags |
1292 | * @tk_ops: RPC call ops |
1293 | * @data: user call data |
1294 | */ |
1295 | int |
1296 | rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, |
1297 | const struct rpc_call_ops *tk_ops, void *data) |
1298 | { |
1299 | struct rpc_task *task; |
1300 | struct rpc_task_setup task_setup_data = { |
1301 | .rpc_client = clnt, |
1302 | .rpc_message = msg, |
1303 | .callback_ops = tk_ops, |
1304 | .callback_data = data, |
1305 | .flags = flags|RPC_TASK_ASYNC, |
1306 | }; |
1307 | |
1308 | task = rpc_run_task(&task_setup_data); |
1309 | if (IS_ERR(ptr: task)) |
1310 | return PTR_ERR(ptr: task); |
1311 | rpc_put_task(task); |
1312 | return 0; |
1313 | } |
1314 | EXPORT_SYMBOL_GPL(rpc_call_async); |
1315 | |
1316 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1317 | static void call_bc_encode(struct rpc_task *task); |
1318 | |
1319 | /** |
1320 | * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run |
1321 | * rpc_execute against it |
1322 | * @req: RPC request |
1323 | * @timeout: timeout values to use for this task |
1324 | */ |
1325 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, |
1326 | struct rpc_timeout *timeout) |
1327 | { |
1328 | struct rpc_task *task; |
1329 | struct rpc_task_setup task_setup_data = { |
1330 | .callback_ops = &rpc_default_ops, |
1331 | .flags = RPC_TASK_SOFTCONN | |
1332 | RPC_TASK_NO_RETRANS_TIMEOUT, |
1333 | }; |
1334 | |
1335 | dprintk("RPC: rpc_run_bc_task req= %p\n" , req); |
1336 | /* |
1337 | * Create an rpc_task to send the data |
1338 | */ |
1339 | task = rpc_new_task(&task_setup_data); |
1340 | if (IS_ERR(ptr: task)) { |
1341 | xprt_free_bc_request(req); |
1342 | return task; |
1343 | } |
1344 | |
1345 | xprt_init_bc_request(req, task, to: timeout); |
1346 | |
1347 | task->tk_action = call_bc_encode; |
1348 | atomic_inc(v: &task->tk_count); |
1349 | WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); |
1350 | rpc_execute(task); |
1351 | |
1352 | dprintk("RPC: rpc_run_bc_task: task= %p\n" , task); |
1353 | return task; |
1354 | } |
1355 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
1356 | |
1357 | /** |
1358 | * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages |
1359 | * @req: RPC request to prepare |
1360 | * @pages: vector of struct page pointers |
1361 | * @base: offset in first page where receive should start, in bytes |
1362 | * @len: expected size of the upper layer data payload, in bytes |
1363 | * @hdrsize: expected size of upper layer reply header, in XDR words |
1364 | * |
1365 | */ |
1366 | void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, |
1367 | unsigned int base, unsigned int len, |
1368 | unsigned int hdrsize) |
1369 | { |
1370 | hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; |
1371 | |
1372 | xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); |
1373 | trace_rpc_xdr_reply_pages(task: req->rq_task, xdr: &req->rq_rcv_buf); |
1374 | } |
1375 | EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); |
1376 | |
1377 | void |
1378 | rpc_call_start(struct rpc_task *task) |
1379 | { |
1380 | task->tk_action = call_start; |
1381 | } |
1382 | EXPORT_SYMBOL_GPL(rpc_call_start); |
1383 | |
1384 | /** |
1385 | * rpc_peeraddr - extract remote peer address from clnt's xprt |
1386 | * @clnt: RPC client structure |
1387 | * @buf: target buffer |
1388 | * @bufsize: length of target buffer |
1389 | * |
1390 | * Returns the number of bytes that are actually in the stored address. |
1391 | */ |
1392 | size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) |
1393 | { |
1394 | size_t bytes; |
1395 | struct rpc_xprt *xprt; |
1396 | |
1397 | rcu_read_lock(); |
1398 | xprt = rcu_dereference(clnt->cl_xprt); |
1399 | |
1400 | bytes = xprt->addrlen; |
1401 | if (bytes > bufsize) |
1402 | bytes = bufsize; |
1403 | memcpy(buf, &xprt->addr, bytes); |
1404 | rcu_read_unlock(); |
1405 | |
1406 | return bytes; |
1407 | } |
1408 | EXPORT_SYMBOL_GPL(rpc_peeraddr); |
1409 | |
1410 | /** |
1411 | * rpc_peeraddr2str - return remote peer address in printable format |
1412 | * @clnt: RPC client structure |
1413 | * @format: address format |
1414 | * |
1415 | * NB: the lifetime of the memory referenced by the returned pointer is |
1416 | * the same as the rpc_xprt itself. As long as the caller uses this |
1417 | * pointer, it must hold the RCU read lock. |
1418 | */ |
1419 | const char *rpc_peeraddr2str(struct rpc_clnt *clnt, |
1420 | enum rpc_display_format_t format) |
1421 | { |
1422 | struct rpc_xprt *xprt; |
1423 | |
1424 | xprt = rcu_dereference(clnt->cl_xprt); |
1425 | |
1426 | if (xprt->address_strings[format] != NULL) |
1427 | return xprt->address_strings[format]; |
1428 | else |
1429 | return "unprintable" ; |
1430 | } |
1431 | EXPORT_SYMBOL_GPL(rpc_peeraddr2str); |
1432 | |
1433 | static const struct sockaddr_in rpc_inaddr_loopback = { |
1434 | .sin_family = AF_INET, |
1435 | .sin_addr.s_addr = htonl(INADDR_ANY), |
1436 | }; |
1437 | |
1438 | static const struct sockaddr_in6 rpc_in6addr_loopback = { |
1439 | .sin6_family = AF_INET6, |
1440 | .sin6_addr = IN6ADDR_ANY_INIT, |
1441 | }; |
1442 | |
1443 | /* |
1444 | * Try a getsockname() on a connected datagram socket. Using a |
1445 | * connected datagram socket prevents leaving a socket in TIME_WAIT. |
1446 | * This conserves the ephemeral port number space. |
1447 | * |
1448 | * Returns zero and fills in "buf" if successful; otherwise, a |
1449 | * negative errno is returned. |
1450 | */ |
1451 | static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, |
1452 | struct sockaddr *buf) |
1453 | { |
1454 | struct socket *sock; |
1455 | int err; |
1456 | |
1457 | err = __sock_create(net, family: sap->sa_family, |
1458 | type: SOCK_DGRAM, IPPROTO_UDP, res: &sock, kern: 1); |
1459 | if (err < 0) { |
1460 | dprintk("RPC: can't create UDP socket (%d)\n" , err); |
1461 | goto out; |
1462 | } |
1463 | |
1464 | switch (sap->sa_family) { |
1465 | case AF_INET: |
1466 | err = kernel_bind(sock, |
1467 | addr: (struct sockaddr *)&rpc_inaddr_loopback, |
1468 | addrlen: sizeof(rpc_inaddr_loopback)); |
1469 | break; |
1470 | case AF_INET6: |
1471 | err = kernel_bind(sock, |
1472 | addr: (struct sockaddr *)&rpc_in6addr_loopback, |
1473 | addrlen: sizeof(rpc_in6addr_loopback)); |
1474 | break; |
1475 | default: |
1476 | err = -EAFNOSUPPORT; |
1477 | goto out_release; |
1478 | } |
1479 | if (err < 0) { |
1480 | dprintk("RPC: can't bind UDP socket (%d)\n" , err); |
1481 | goto out_release; |
1482 | } |
1483 | |
1484 | err = kernel_connect(sock, addr: sap, addrlen: salen, flags: 0); |
1485 | if (err < 0) { |
1486 | dprintk("RPC: can't connect UDP socket (%d)\n" , err); |
1487 | goto out_release; |
1488 | } |
1489 | |
1490 | err = kernel_getsockname(sock, addr: buf); |
1491 | if (err < 0) { |
1492 | dprintk("RPC: getsockname failed (%d)\n" , err); |
1493 | goto out_release; |
1494 | } |
1495 | |
1496 | err = 0; |
1497 | if (buf->sa_family == AF_INET6) { |
1498 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; |
1499 | sin6->sin6_scope_id = 0; |
1500 | } |
1501 | dprintk("RPC: %s succeeded\n" , __func__); |
1502 | |
1503 | out_release: |
1504 | sock_release(sock); |
1505 | out: |
1506 | return err; |
1507 | } |
1508 | |
1509 | /* |
1510 | * Scraping a connected socket failed, so we don't have a useable |
1511 | * local address. Fallback: generate an address that will prevent |
1512 | * the server from calling us back. |
1513 | * |
1514 | * Returns zero and fills in "buf" if successful; otherwise, a |
1515 | * negative errno is returned. |
1516 | */ |
1517 | static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) |
1518 | { |
1519 | switch (family) { |
1520 | case AF_INET: |
1521 | if (buflen < sizeof(rpc_inaddr_loopback)) |
1522 | return -EINVAL; |
1523 | memcpy(buf, &rpc_inaddr_loopback, |
1524 | sizeof(rpc_inaddr_loopback)); |
1525 | break; |
1526 | case AF_INET6: |
1527 | if (buflen < sizeof(rpc_in6addr_loopback)) |
1528 | return -EINVAL; |
1529 | memcpy(buf, &rpc_in6addr_loopback, |
1530 | sizeof(rpc_in6addr_loopback)); |
1531 | break; |
1532 | default: |
1533 | dprintk("RPC: %s: address family not supported\n" , |
1534 | __func__); |
1535 | return -EAFNOSUPPORT; |
1536 | } |
1537 | dprintk("RPC: %s: succeeded\n" , __func__); |
1538 | return 0; |
1539 | } |
1540 | |
1541 | /** |
1542 | * rpc_localaddr - discover local endpoint address for an RPC client |
1543 | * @clnt: RPC client structure |
1544 | * @buf: target buffer |
1545 | * @buflen: size of target buffer, in bytes |
1546 | * |
1547 | * Returns zero and fills in "buf" and "buflen" if successful; |
1548 | * otherwise, a negative errno is returned. |
1549 | * |
1550 | * This works even if the underlying transport is not currently connected, |
1551 | * or if the upper layer never previously provided a source address. |
1552 | * |
1553 | * The result of this function call is transient: multiple calls in |
1554 | * succession may give different results, depending on how local |
1555 | * networking configuration changes over time. |
1556 | */ |
1557 | int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) |
1558 | { |
1559 | struct sockaddr_storage address; |
1560 | struct sockaddr *sap = (struct sockaddr *)&address; |
1561 | struct rpc_xprt *xprt; |
1562 | struct net *net; |
1563 | size_t salen; |
1564 | int err; |
1565 | |
1566 | rcu_read_lock(); |
1567 | xprt = rcu_dereference(clnt->cl_xprt); |
1568 | salen = xprt->addrlen; |
1569 | memcpy(sap, &xprt->addr, salen); |
1570 | net = get_net(net: xprt->xprt_net); |
1571 | rcu_read_unlock(); |
1572 | |
1573 | rpc_set_port(sap, port: 0); |
1574 | err = rpc_sockname(net, sap, salen, buf); |
1575 | put_net(net); |
1576 | if (err != 0) |
1577 | /* Couldn't discover local address, return ANYADDR */ |
1578 | return rpc_anyaddr(family: sap->sa_family, buf, buflen); |
1579 | return 0; |
1580 | } |
1581 | EXPORT_SYMBOL_GPL(rpc_localaddr); |
1582 | |
1583 | void |
1584 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) |
1585 | { |
1586 | struct rpc_xprt *xprt; |
1587 | |
1588 | rcu_read_lock(); |
1589 | xprt = rcu_dereference(clnt->cl_xprt); |
1590 | if (xprt->ops->set_buffer_size) |
1591 | xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); |
1592 | rcu_read_unlock(); |
1593 | } |
1594 | EXPORT_SYMBOL_GPL(rpc_setbufsize); |
1595 | |
1596 | /** |
1597 | * rpc_net_ns - Get the network namespace for this RPC client |
1598 | * @clnt: RPC client to query |
1599 | * |
1600 | */ |
1601 | struct net *rpc_net_ns(struct rpc_clnt *clnt) |
1602 | { |
1603 | struct net *ret; |
1604 | |
1605 | rcu_read_lock(); |
1606 | ret = rcu_dereference(clnt->cl_xprt)->xprt_net; |
1607 | rcu_read_unlock(); |
1608 | return ret; |
1609 | } |
1610 | EXPORT_SYMBOL_GPL(rpc_net_ns); |
1611 | |
1612 | /** |
1613 | * rpc_max_payload - Get maximum payload size for a transport, in bytes |
1614 | * @clnt: RPC client to query |
1615 | * |
1616 | * For stream transports, this is one RPC record fragment (see RFC |
1617 | * 1831), as we don't support multi-record requests yet. For datagram |
1618 | * transports, this is the size of an IP packet minus the IP, UDP, and |
1619 | * RPC header sizes. |
1620 | */ |
1621 | size_t rpc_max_payload(struct rpc_clnt *clnt) |
1622 | { |
1623 | size_t ret; |
1624 | |
1625 | rcu_read_lock(); |
1626 | ret = rcu_dereference(clnt->cl_xprt)->max_payload; |
1627 | rcu_read_unlock(); |
1628 | return ret; |
1629 | } |
1630 | EXPORT_SYMBOL_GPL(rpc_max_payload); |
1631 | |
1632 | /** |
1633 | * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes |
1634 | * @clnt: RPC client to query |
1635 | */ |
1636 | size_t rpc_max_bc_payload(struct rpc_clnt *clnt) |
1637 | { |
1638 | struct rpc_xprt *xprt; |
1639 | size_t ret; |
1640 | |
1641 | rcu_read_lock(); |
1642 | xprt = rcu_dereference(clnt->cl_xprt); |
1643 | ret = xprt->ops->bc_maxpayload(xprt); |
1644 | rcu_read_unlock(); |
1645 | return ret; |
1646 | } |
1647 | EXPORT_SYMBOL_GPL(rpc_max_bc_payload); |
1648 | |
1649 | unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) |
1650 | { |
1651 | struct rpc_xprt *xprt; |
1652 | unsigned int ret; |
1653 | |
1654 | rcu_read_lock(); |
1655 | xprt = rcu_dereference(clnt->cl_xprt); |
1656 | ret = xprt->ops->bc_num_slots(xprt); |
1657 | rcu_read_unlock(); |
1658 | return ret; |
1659 | } |
1660 | EXPORT_SYMBOL_GPL(rpc_num_bc_slots); |
1661 | |
1662 | /** |
1663 | * rpc_force_rebind - force transport to check that remote port is unchanged |
1664 | * @clnt: client to rebind |
1665 | * |
1666 | */ |
1667 | void rpc_force_rebind(struct rpc_clnt *clnt) |
1668 | { |
1669 | if (clnt->cl_autobind) { |
1670 | rcu_read_lock(); |
1671 | xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); |
1672 | rcu_read_unlock(); |
1673 | } |
1674 | } |
1675 | EXPORT_SYMBOL_GPL(rpc_force_rebind); |
1676 | |
1677 | static int |
1678 | __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) |
1679 | { |
1680 | task->tk_status = 0; |
1681 | task->tk_rpc_status = 0; |
1682 | task->tk_action = action; |
1683 | return 1; |
1684 | } |
1685 | |
1686 | /* |
1687 | * Restart an (async) RPC call. Usually called from within the |
1688 | * exit handler. |
1689 | */ |
1690 | int |
1691 | rpc_restart_call(struct rpc_task *task) |
1692 | { |
1693 | return __rpc_restart_call(task, action: call_start); |
1694 | } |
1695 | EXPORT_SYMBOL_GPL(rpc_restart_call); |
1696 | |
1697 | /* |
1698 | * Restart an (async) RPC call from the call_prepare state. |
1699 | * Usually called from within the exit handler. |
1700 | */ |
1701 | int |
1702 | rpc_restart_call_prepare(struct rpc_task *task) |
1703 | { |
1704 | if (task->tk_ops->rpc_call_prepare != NULL) |
1705 | return __rpc_restart_call(task, action: rpc_prepare_task); |
1706 | return rpc_restart_call(task); |
1707 | } |
1708 | EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); |
1709 | |
1710 | const char |
1711 | *rpc_proc_name(const struct rpc_task *task) |
1712 | { |
1713 | const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
1714 | |
1715 | if (proc) { |
1716 | if (proc->p_name) |
1717 | return proc->p_name; |
1718 | else |
1719 | return "NULL" ; |
1720 | } else |
1721 | return "no proc" ; |
1722 | } |
1723 | |
1724 | static void |
1725 | __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) |
1726 | { |
1727 | trace_rpc_call_rpcerror(task, tk_status, rpc_status); |
1728 | rpc_task_set_rpc_status(task, rpc_status); |
1729 | rpc_exit(task, tk_status); |
1730 | } |
1731 | |
1732 | static void |
1733 | rpc_call_rpcerror(struct rpc_task *task, int status) |
1734 | { |
1735 | __rpc_call_rpcerror(task, tk_status: status, rpc_status: status); |
1736 | } |
1737 | |
1738 | /* |
1739 | * 0. Initial state |
1740 | * |
1741 | * Other FSM states can be visited zero or more times, but |
1742 | * this state is visited exactly once for each RPC. |
1743 | */ |
1744 | static void |
1745 | call_start(struct rpc_task *task) |
1746 | { |
1747 | struct rpc_clnt *clnt = task->tk_client; |
1748 | int idx = task->tk_msg.rpc_proc->p_statidx; |
1749 | |
1750 | trace_rpc_request(task); |
1751 | |
1752 | if (task->tk_client->cl_shutdown) { |
1753 | rpc_call_rpcerror(task, status: -EIO); |
1754 | return; |
1755 | } |
1756 | |
1757 | /* Increment call count (version might not be valid for ping) */ |
1758 | if (clnt->cl_program->version[clnt->cl_vers]) |
1759 | clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; |
1760 | clnt->cl_stats->rpccnt++; |
1761 | task->tk_action = call_reserve; |
1762 | rpc_task_set_transport(task, clnt); |
1763 | } |
1764 | |
1765 | /* |
1766 | * 1. Reserve an RPC call slot |
1767 | */ |
1768 | static void |
1769 | call_reserve(struct rpc_task *task) |
1770 | { |
1771 | task->tk_status = 0; |
1772 | task->tk_action = call_reserveresult; |
1773 | xprt_reserve(task); |
1774 | } |
1775 | |
1776 | static void call_retry_reserve(struct rpc_task *task); |
1777 | |
1778 | /* |
1779 | * 1b. Grok the result of xprt_reserve() |
1780 | */ |
1781 | static void |
1782 | call_reserveresult(struct rpc_task *task) |
1783 | { |
1784 | int status = task->tk_status; |
1785 | |
1786 | /* |
1787 | * After a call to xprt_reserve(), we must have either |
1788 | * a request slot or else an error status. |
1789 | */ |
1790 | task->tk_status = 0; |
1791 | if (status >= 0) { |
1792 | if (task->tk_rqstp) { |
1793 | task->tk_action = call_refresh; |
1794 | return; |
1795 | } |
1796 | |
1797 | rpc_call_rpcerror(task, status: -EIO); |
1798 | return; |
1799 | } |
1800 | |
1801 | switch (status) { |
1802 | case -ENOMEM: |
1803 | rpc_delay(task, HZ >> 2); |
1804 | fallthrough; |
1805 | case -EAGAIN: /* woken up; retry */ |
1806 | task->tk_action = call_retry_reserve; |
1807 | return; |
1808 | default: |
1809 | rpc_call_rpcerror(task, status); |
1810 | } |
1811 | } |
1812 | |
1813 | /* |
1814 | * 1c. Retry reserving an RPC call slot |
1815 | */ |
1816 | static void |
1817 | call_retry_reserve(struct rpc_task *task) |
1818 | { |
1819 | task->tk_status = 0; |
1820 | task->tk_action = call_reserveresult; |
1821 | xprt_retry_reserve(task); |
1822 | } |
1823 | |
1824 | /* |
1825 | * 2. Bind and/or refresh the credentials |
1826 | */ |
1827 | static void |
1828 | call_refresh(struct rpc_task *task) |
1829 | { |
1830 | task->tk_action = call_refreshresult; |
1831 | task->tk_status = 0; |
1832 | task->tk_client->cl_stats->rpcauthrefresh++; |
1833 | rpcauth_refreshcred(task); |
1834 | } |
1835 | |
1836 | /* |
1837 | * 2a. Process the results of a credential refresh |
1838 | */ |
1839 | static void |
1840 | call_refreshresult(struct rpc_task *task) |
1841 | { |
1842 | int status = task->tk_status; |
1843 | |
1844 | task->tk_status = 0; |
1845 | task->tk_action = call_refresh; |
1846 | switch (status) { |
1847 | case 0: |
1848 | if (rpcauth_uptodatecred(task)) { |
1849 | task->tk_action = call_allocate; |
1850 | return; |
1851 | } |
1852 | /* Use rate-limiting and a max number of retries if refresh |
1853 | * had status 0 but failed to update the cred. |
1854 | */ |
1855 | fallthrough; |
1856 | case -ETIMEDOUT: |
1857 | rpc_delay(task, 3*HZ); |
1858 | fallthrough; |
1859 | case -EAGAIN: |
1860 | status = -EACCES; |
1861 | fallthrough; |
1862 | case -EKEYEXPIRED: |
1863 | if (!task->tk_cred_retry) |
1864 | break; |
1865 | task->tk_cred_retry--; |
1866 | trace_rpc_retry_refresh_status(task); |
1867 | return; |
1868 | case -ENOMEM: |
1869 | rpc_delay(task, HZ >> 4); |
1870 | return; |
1871 | } |
1872 | trace_rpc_refresh_status(task); |
1873 | rpc_call_rpcerror(task, status); |
1874 | } |
1875 | |
1876 | /* |
1877 | * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. |
1878 | * (Note: buffer memory is freed in xprt_release). |
1879 | */ |
1880 | static void |
1881 | call_allocate(struct rpc_task *task) |
1882 | { |
1883 | const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; |
1884 | struct rpc_rqst *req = task->tk_rqstp; |
1885 | struct rpc_xprt *xprt = req->rq_xprt; |
1886 | const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
1887 | int status; |
1888 | |
1889 | task->tk_status = 0; |
1890 | task->tk_action = call_encode; |
1891 | |
1892 | if (req->rq_buffer) |
1893 | return; |
1894 | |
1895 | if (proc->p_proc != 0) { |
1896 | BUG_ON(proc->p_arglen == 0); |
1897 | if (proc->p_decode != NULL) |
1898 | BUG_ON(proc->p_replen == 0); |
1899 | } |
1900 | |
1901 | /* |
1902 | * Calculate the size (in quads) of the RPC call |
1903 | * and reply headers, and convert both values |
1904 | * to byte sizes. |
1905 | */ |
1906 | req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + |
1907 | proc->p_arglen; |
1908 | req->rq_callsize <<= 2; |
1909 | /* |
1910 | * Note: the reply buffer must at minimum allocate enough space |
1911 | * for the 'struct accepted_reply' from RFC5531. |
1912 | */ |
1913 | req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ |
1914 | max_t(size_t, proc->p_replen, 2); |
1915 | req->rq_rcvsize <<= 2; |
1916 | |
1917 | status = xprt->ops->buf_alloc(task); |
1918 | trace_rpc_buf_alloc(task, status); |
1919 | if (status == 0) |
1920 | return; |
1921 | if (status != -ENOMEM) { |
1922 | rpc_call_rpcerror(task, status); |
1923 | return; |
1924 | } |
1925 | |
1926 | if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { |
1927 | task->tk_action = call_allocate; |
1928 | rpc_delay(task, HZ>>4); |
1929 | return; |
1930 | } |
1931 | |
1932 | rpc_call_rpcerror(task, status: -ERESTARTSYS); |
1933 | } |
1934 | |
1935 | static int |
1936 | rpc_task_need_encode(struct rpc_task *task) |
1937 | { |
1938 | return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && |
1939 | (!(task->tk_flags & RPC_TASK_SENT) || |
1940 | !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || |
1941 | xprt_request_need_retransmit(task)); |
1942 | } |
1943 | |
1944 | static void |
1945 | rpc_xdr_encode(struct rpc_task *task) |
1946 | { |
1947 | struct rpc_rqst *req = task->tk_rqstp; |
1948 | struct xdr_stream xdr; |
1949 | |
1950 | xdr_buf_init(buf: &req->rq_snd_buf, |
1951 | start: req->rq_buffer, |
1952 | len: req->rq_callsize); |
1953 | xdr_buf_init(buf: &req->rq_rcv_buf, |
1954 | start: req->rq_rbuffer, |
1955 | len: req->rq_rcvsize); |
1956 | |
1957 | req->rq_reply_bytes_recvd = 0; |
1958 | req->rq_snd_buf.head[0].iov_len = 0; |
1959 | xdr_init_encode(xdr: &xdr, buf: &req->rq_snd_buf, |
1960 | p: req->rq_snd_buf.head[0].iov_base, rqst: req); |
1961 | if (rpc_encode_header(task, xdr: &xdr)) |
1962 | return; |
1963 | |
1964 | task->tk_status = rpcauth_wrap_req(task, xdr: &xdr); |
1965 | } |
1966 | |
1967 | /* |
1968 | * 3. Encode arguments of an RPC call |
1969 | */ |
1970 | static void |
1971 | call_encode(struct rpc_task *task) |
1972 | { |
1973 | if (!rpc_task_need_encode(task)) |
1974 | goto out; |
1975 | |
1976 | /* Dequeue task from the receive queue while we're encoding */ |
1977 | xprt_request_dequeue_xprt(task); |
1978 | /* Encode here so that rpcsec_gss can use correct sequence number. */ |
1979 | rpc_xdr_encode(task); |
1980 | /* Add task to reply queue before transmission to avoid races */ |
1981 | if (task->tk_status == 0 && rpc_reply_expected(task)) |
1982 | task->tk_status = xprt_request_enqueue_receive(task); |
1983 | /* Did the encode result in an error condition? */ |
1984 | if (task->tk_status != 0) { |
1985 | /* Was the error nonfatal? */ |
1986 | switch (task->tk_status) { |
1987 | case -EAGAIN: |
1988 | case -ENOMEM: |
1989 | rpc_delay(task, HZ >> 4); |
1990 | break; |
1991 | case -EKEYEXPIRED: |
1992 | if (!task->tk_cred_retry) { |
1993 | rpc_call_rpcerror(task, status: task->tk_status); |
1994 | } else { |
1995 | task->tk_action = call_refresh; |
1996 | task->tk_cred_retry--; |
1997 | trace_rpc_retry_refresh_status(task); |
1998 | } |
1999 | break; |
2000 | default: |
2001 | rpc_call_rpcerror(task, status: task->tk_status); |
2002 | } |
2003 | return; |
2004 | } |
2005 | |
2006 | xprt_request_enqueue_transmit(task); |
2007 | out: |
2008 | task->tk_action = call_transmit; |
2009 | /* Check that the connection is OK */ |
2010 | if (!xprt_bound(xprt: task->tk_xprt)) |
2011 | task->tk_action = call_bind; |
2012 | else if (!xprt_connected(xprt: task->tk_xprt)) |
2013 | task->tk_action = call_connect; |
2014 | } |
2015 | |
2016 | /* |
2017 | * Helpers to check if the task was already transmitted, and |
2018 | * to take action when that is the case. |
2019 | */ |
2020 | static bool |
2021 | rpc_task_transmitted(struct rpc_task *task) |
2022 | { |
2023 | return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); |
2024 | } |
2025 | |
2026 | static void |
2027 | rpc_task_handle_transmitted(struct rpc_task *task) |
2028 | { |
2029 | xprt_end_transmit(task); |
2030 | task->tk_action = call_transmit_status; |
2031 | } |
2032 | |
2033 | /* |
2034 | * 4. Get the server port number if not yet set |
2035 | */ |
2036 | static void |
2037 | call_bind(struct rpc_task *task) |
2038 | { |
2039 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
2040 | |
2041 | if (rpc_task_transmitted(task)) { |
2042 | rpc_task_handle_transmitted(task); |
2043 | return; |
2044 | } |
2045 | |
2046 | if (xprt_bound(xprt)) { |
2047 | task->tk_action = call_connect; |
2048 | return; |
2049 | } |
2050 | |
2051 | task->tk_action = call_bind_status; |
2052 | if (!xprt_prepare_transmit(task)) |
2053 | return; |
2054 | |
2055 | xprt->ops->rpcbind(task); |
2056 | } |
2057 | |
2058 | /* |
2059 | * 4a. Sort out bind result |
2060 | */ |
2061 | static void |
2062 | call_bind_status(struct rpc_task *task) |
2063 | { |
2064 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
2065 | int status = -EIO; |
2066 | |
2067 | if (rpc_task_transmitted(task)) { |
2068 | rpc_task_handle_transmitted(task); |
2069 | return; |
2070 | } |
2071 | |
2072 | if (task->tk_status >= 0) |
2073 | goto out_next; |
2074 | if (xprt_bound(xprt)) { |
2075 | task->tk_status = 0; |
2076 | goto out_next; |
2077 | } |
2078 | |
2079 | switch (task->tk_status) { |
2080 | case -ENOMEM: |
2081 | rpc_delay(task, HZ >> 2); |
2082 | goto retry_timeout; |
2083 | case -EACCES: |
2084 | trace_rpcb_prog_unavail_err(task); |
2085 | /* fail immediately if this is an RPC ping */ |
2086 | if (task->tk_msg.rpc_proc->p_proc == 0) { |
2087 | status = -EOPNOTSUPP; |
2088 | break; |
2089 | } |
2090 | rpc_delay(task, 3*HZ); |
2091 | goto retry_timeout; |
2092 | case -ENOBUFS: |
2093 | rpc_delay(task, HZ >> 2); |
2094 | goto retry_timeout; |
2095 | case -EAGAIN: |
2096 | goto retry_timeout; |
2097 | case -ETIMEDOUT: |
2098 | trace_rpcb_timeout_err(task); |
2099 | goto retry_timeout; |
2100 | case -EPFNOSUPPORT: |
2101 | /* server doesn't support any rpcbind version we know of */ |
2102 | trace_rpcb_bind_version_err(task); |
2103 | break; |
2104 | case -EPROTONOSUPPORT: |
2105 | trace_rpcb_bind_version_err(task); |
2106 | goto retry_timeout; |
2107 | case -ECONNREFUSED: /* connection problems */ |
2108 | case -ECONNRESET: |
2109 | case -ECONNABORTED: |
2110 | case -ENOTCONN: |
2111 | case -EHOSTDOWN: |
2112 | case -ENETDOWN: |
2113 | case -EHOSTUNREACH: |
2114 | case -ENETUNREACH: |
2115 | case -EPIPE: |
2116 | trace_rpcb_unreachable_err(task); |
2117 | if (!RPC_IS_SOFTCONN(task)) { |
2118 | rpc_delay(task, 5*HZ); |
2119 | goto retry_timeout; |
2120 | } |
2121 | status = task->tk_status; |
2122 | break; |
2123 | default: |
2124 | trace_rpcb_unrecognized_err(task); |
2125 | } |
2126 | |
2127 | rpc_call_rpcerror(task, status); |
2128 | return; |
2129 | out_next: |
2130 | task->tk_action = call_connect; |
2131 | return; |
2132 | retry_timeout: |
2133 | task->tk_status = 0; |
2134 | task->tk_action = call_bind; |
2135 | rpc_check_timeout(task); |
2136 | } |
2137 | |
2138 | /* |
2139 | * 4b. Connect to the RPC server |
2140 | */ |
2141 | static void |
2142 | call_connect(struct rpc_task *task) |
2143 | { |
2144 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
2145 | |
2146 | if (rpc_task_transmitted(task)) { |
2147 | rpc_task_handle_transmitted(task); |
2148 | return; |
2149 | } |
2150 | |
2151 | if (xprt_connected(xprt)) { |
2152 | task->tk_action = call_transmit; |
2153 | return; |
2154 | } |
2155 | |
2156 | task->tk_action = call_connect_status; |
2157 | if (task->tk_status < 0) |
2158 | return; |
2159 | if (task->tk_flags & RPC_TASK_NOCONNECT) { |
2160 | rpc_call_rpcerror(task, status: -ENOTCONN); |
2161 | return; |
2162 | } |
2163 | if (!xprt_prepare_transmit(task)) |
2164 | return; |
2165 | xprt_connect(task); |
2166 | } |
2167 | |
2168 | /* |
2169 | * 4c. Sort out connect result |
2170 | */ |
2171 | static void |
2172 | call_connect_status(struct rpc_task *task) |
2173 | { |
2174 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
2175 | struct rpc_clnt *clnt = task->tk_client; |
2176 | int status = task->tk_status; |
2177 | |
2178 | if (rpc_task_transmitted(task)) { |
2179 | rpc_task_handle_transmitted(task); |
2180 | return; |
2181 | } |
2182 | |
2183 | trace_rpc_connect_status(task); |
2184 | |
2185 | if (task->tk_status == 0) { |
2186 | clnt->cl_stats->netreconn++; |
2187 | goto out_next; |
2188 | } |
2189 | if (xprt_connected(xprt)) { |
2190 | task->tk_status = 0; |
2191 | goto out_next; |
2192 | } |
2193 | |
2194 | task->tk_status = 0; |
2195 | switch (status) { |
2196 | case -ECONNREFUSED: |
2197 | case -ECONNRESET: |
2198 | /* A positive refusal suggests a rebind is needed. */ |
2199 | if (RPC_IS_SOFTCONN(task)) |
2200 | break; |
2201 | if (clnt->cl_autobind) { |
2202 | rpc_force_rebind(clnt); |
2203 | goto out_retry; |
2204 | } |
2205 | fallthrough; |
2206 | case -ECONNABORTED: |
2207 | case -ENETDOWN: |
2208 | case -ENETUNREACH: |
2209 | case -EHOSTUNREACH: |
2210 | case -EPIPE: |
2211 | case -EPROTO: |
2212 | xprt_conditional_disconnect(xprt: task->tk_rqstp->rq_xprt, |
2213 | cookie: task->tk_rqstp->rq_connect_cookie); |
2214 | if (RPC_IS_SOFTCONN(task)) |
2215 | break; |
2216 | /* retry with existing socket, after a delay */ |
2217 | rpc_delay(task, 3*HZ); |
2218 | fallthrough; |
2219 | case -EADDRINUSE: |
2220 | case -ENOTCONN: |
2221 | case -EAGAIN: |
2222 | case -ETIMEDOUT: |
2223 | if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && |
2224 | (task->tk_flags & RPC_TASK_MOVEABLE) && |
2225 | test_bit(XPRT_REMOVE, &xprt->state)) { |
2226 | struct rpc_xprt *saved = task->tk_xprt; |
2227 | struct rpc_xprt_switch *xps; |
2228 | |
2229 | xps = rpc_clnt_xprt_switch_get(clnt); |
2230 | if (xps->xps_nxprts > 1) { |
2231 | long value; |
2232 | |
2233 | xprt_release(task); |
2234 | value = atomic_long_dec_return(v: &xprt->queuelen); |
2235 | if (value == 0) |
2236 | rpc_xprt_switch_remove_xprt(xps, xprt: saved, |
2237 | offline: true); |
2238 | xprt_put(xprt: saved); |
2239 | task->tk_xprt = NULL; |
2240 | task->tk_action = call_start; |
2241 | } |
2242 | xprt_switch_put(xps); |
2243 | if (!task->tk_xprt) |
2244 | goto out; |
2245 | } |
2246 | goto out_retry; |
2247 | case -ENOBUFS: |
2248 | rpc_delay(task, HZ >> 2); |
2249 | goto out_retry; |
2250 | } |
2251 | rpc_call_rpcerror(task, status); |
2252 | return; |
2253 | out_next: |
2254 | task->tk_action = call_transmit; |
2255 | return; |
2256 | out_retry: |
2257 | /* Check for timeouts before looping back to call_bind */ |
2258 | task->tk_action = call_bind; |
2259 | out: |
2260 | rpc_check_timeout(task); |
2261 | } |
2262 | |
2263 | /* |
2264 | * 5. Transmit the RPC request, and wait for reply |
2265 | */ |
2266 | static void |
2267 | call_transmit(struct rpc_task *task) |
2268 | { |
2269 | if (rpc_task_transmitted(task)) { |
2270 | rpc_task_handle_transmitted(task); |
2271 | return; |
2272 | } |
2273 | |
2274 | task->tk_action = call_transmit_status; |
2275 | if (!xprt_prepare_transmit(task)) |
2276 | return; |
2277 | task->tk_status = 0; |
2278 | if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { |
2279 | if (!xprt_connected(xprt: task->tk_xprt)) { |
2280 | task->tk_status = -ENOTCONN; |
2281 | return; |
2282 | } |
2283 | xprt_transmit(task); |
2284 | } |
2285 | xprt_end_transmit(task); |
2286 | } |
2287 | |
2288 | /* |
2289 | * 5a. Handle cleanup after a transmission |
2290 | */ |
2291 | static void |
2292 | call_transmit_status(struct rpc_task *task) |
2293 | { |
2294 | task->tk_action = call_status; |
2295 | |
2296 | /* |
2297 | * Common case: success. Force the compiler to put this |
2298 | * test first. |
2299 | */ |
2300 | if (rpc_task_transmitted(task)) { |
2301 | task->tk_status = 0; |
2302 | xprt_request_wait_receive(task); |
2303 | return; |
2304 | } |
2305 | |
2306 | switch (task->tk_status) { |
2307 | default: |
2308 | break; |
2309 | case -EBADMSG: |
2310 | task->tk_status = 0; |
2311 | task->tk_action = call_encode; |
2312 | break; |
2313 | /* |
2314 | * Special cases: if we've been waiting on the |
2315 | * socket's write_space() callback, or if the |
2316 | * socket just returned a connection error, |
2317 | * then hold onto the transport lock. |
2318 | */ |
2319 | case -ENOMEM: |
2320 | case -ENOBUFS: |
2321 | rpc_delay(task, HZ>>2); |
2322 | fallthrough; |
2323 | case -EBADSLT: |
2324 | case -EAGAIN: |
2325 | task->tk_action = call_transmit; |
2326 | task->tk_status = 0; |
2327 | break; |
2328 | case -ECONNREFUSED: |
2329 | case -EHOSTDOWN: |
2330 | case -ENETDOWN: |
2331 | case -EHOSTUNREACH: |
2332 | case -ENETUNREACH: |
2333 | case -EPERM: |
2334 | if (RPC_IS_SOFTCONN(task)) { |
2335 | if (!task->tk_msg.rpc_proc->p_proc) |
2336 | trace_xprt_ping(xprt: task->tk_xprt, |
2337 | status: task->tk_status); |
2338 | rpc_call_rpcerror(task, status: task->tk_status); |
2339 | return; |
2340 | } |
2341 | fallthrough; |
2342 | case -ECONNRESET: |
2343 | case -ECONNABORTED: |
2344 | case -EADDRINUSE: |
2345 | case -ENOTCONN: |
2346 | case -EPIPE: |
2347 | task->tk_action = call_bind; |
2348 | task->tk_status = 0; |
2349 | break; |
2350 | } |
2351 | rpc_check_timeout(task); |
2352 | } |
2353 | |
2354 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
2355 | static void call_bc_transmit(struct rpc_task *task); |
2356 | static void call_bc_transmit_status(struct rpc_task *task); |
2357 | |
2358 | static void |
2359 | call_bc_encode(struct rpc_task *task) |
2360 | { |
2361 | xprt_request_enqueue_transmit(task); |
2362 | task->tk_action = call_bc_transmit; |
2363 | } |
2364 | |
2365 | /* |
2366 | * 5b. Send the backchannel RPC reply. On error, drop the reply. In |
2367 | * addition, disconnect on connectivity errors. |
2368 | */ |
2369 | static void |
2370 | call_bc_transmit(struct rpc_task *task) |
2371 | { |
2372 | task->tk_action = call_bc_transmit_status; |
2373 | if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { |
2374 | if (!xprt_prepare_transmit(task)) |
2375 | return; |
2376 | task->tk_status = 0; |
2377 | xprt_transmit(task); |
2378 | } |
2379 | xprt_end_transmit(task); |
2380 | } |
2381 | |
2382 | static void |
2383 | call_bc_transmit_status(struct rpc_task *task) |
2384 | { |
2385 | struct rpc_rqst *req = task->tk_rqstp; |
2386 | |
2387 | if (rpc_task_transmitted(task)) |
2388 | task->tk_status = 0; |
2389 | |
2390 | switch (task->tk_status) { |
2391 | case 0: |
2392 | /* Success */ |
2393 | case -ENETDOWN: |
2394 | case -EHOSTDOWN: |
2395 | case -EHOSTUNREACH: |
2396 | case -ENETUNREACH: |
2397 | case -ECONNRESET: |
2398 | case -ECONNREFUSED: |
2399 | case -EADDRINUSE: |
2400 | case -ENOTCONN: |
2401 | case -EPIPE: |
2402 | break; |
2403 | case -ENOMEM: |
2404 | case -ENOBUFS: |
2405 | rpc_delay(task, HZ>>2); |
2406 | fallthrough; |
2407 | case -EBADSLT: |
2408 | case -EAGAIN: |
2409 | task->tk_status = 0; |
2410 | task->tk_action = call_bc_transmit; |
2411 | return; |
2412 | case -ETIMEDOUT: |
2413 | /* |
2414 | * Problem reaching the server. Disconnect and let the |
2415 | * forechannel reestablish the connection. The server will |
2416 | * have to retransmit the backchannel request and we'll |
2417 | * reprocess it. Since these ops are idempotent, there's no |
2418 | * need to cache our reply at this time. |
2419 | */ |
2420 | printk(KERN_NOTICE "RPC: Could not send backchannel reply " |
2421 | "error: %d\n" , task->tk_status); |
2422 | xprt_conditional_disconnect(xprt: req->rq_xprt, |
2423 | cookie: req->rq_connect_cookie); |
2424 | break; |
2425 | default: |
2426 | /* |
2427 | * We were unable to reply and will have to drop the |
2428 | * request. The server should reconnect and retransmit. |
2429 | */ |
2430 | printk(KERN_NOTICE "RPC: Could not send backchannel reply " |
2431 | "error: %d\n" , task->tk_status); |
2432 | break; |
2433 | } |
2434 | task->tk_action = rpc_exit_task; |
2435 | } |
2436 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
2437 | |
2438 | /* |
2439 | * 6. Sort out the RPC call status |
2440 | */ |
2441 | static void |
2442 | call_status(struct rpc_task *task) |
2443 | { |
2444 | struct rpc_clnt *clnt = task->tk_client; |
2445 | int status; |
2446 | |
2447 | if (!task->tk_msg.rpc_proc->p_proc) |
2448 | trace_xprt_ping(xprt: task->tk_xprt, status: task->tk_status); |
2449 | |
2450 | status = task->tk_status; |
2451 | if (status >= 0) { |
2452 | task->tk_action = call_decode; |
2453 | return; |
2454 | } |
2455 | |
2456 | trace_rpc_call_status(task); |
2457 | task->tk_status = 0; |
2458 | switch(status) { |
2459 | case -EHOSTDOWN: |
2460 | case -ENETDOWN: |
2461 | case -EHOSTUNREACH: |
2462 | case -ENETUNREACH: |
2463 | case -EPERM: |
2464 | if (RPC_IS_SOFTCONN(task)) |
2465 | goto out_exit; |
2466 | /* |
2467 | * Delay any retries for 3 seconds, then handle as if it |
2468 | * were a timeout. |
2469 | */ |
2470 | rpc_delay(task, 3*HZ); |
2471 | fallthrough; |
2472 | case -ETIMEDOUT: |
2473 | break; |
2474 | case -ECONNREFUSED: |
2475 | case -ECONNRESET: |
2476 | case -ECONNABORTED: |
2477 | case -ENOTCONN: |
2478 | rpc_force_rebind(clnt); |
2479 | break; |
2480 | case -EADDRINUSE: |
2481 | rpc_delay(task, 3*HZ); |
2482 | fallthrough; |
2483 | case -EPIPE: |
2484 | case -EAGAIN: |
2485 | break; |
2486 | case -ENFILE: |
2487 | case -ENOBUFS: |
2488 | case -ENOMEM: |
2489 | rpc_delay(task, HZ>>2); |
2490 | break; |
2491 | case -EIO: |
2492 | /* shutdown or soft timeout */ |
2493 | goto out_exit; |
2494 | default: |
2495 | if (clnt->cl_chatty) |
2496 | printk("%s: RPC call returned error %d\n" , |
2497 | clnt->cl_program->name, -status); |
2498 | goto out_exit; |
2499 | } |
2500 | task->tk_action = call_encode; |
2501 | rpc_check_timeout(task); |
2502 | return; |
2503 | out_exit: |
2504 | rpc_call_rpcerror(task, status); |
2505 | } |
2506 | |
2507 | static bool |
2508 | rpc_check_connected(const struct rpc_rqst *req) |
2509 | { |
2510 | /* No allocated request or transport? return true */ |
2511 | if (!req || !req->rq_xprt) |
2512 | return true; |
2513 | return xprt_connected(xprt: req->rq_xprt); |
2514 | } |
2515 | |
2516 | static void |
2517 | rpc_check_timeout(struct rpc_task *task) |
2518 | { |
2519 | struct rpc_clnt *clnt = task->tk_client; |
2520 | |
2521 | if (RPC_SIGNALLED(task)) |
2522 | return; |
2523 | |
2524 | if (xprt_adjust_timeout(req: task->tk_rqstp) == 0) |
2525 | return; |
2526 | |
2527 | trace_rpc_timeout_status(task); |
2528 | task->tk_timeouts++; |
2529 | |
2530 | if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(req: task->tk_rqstp)) { |
2531 | rpc_call_rpcerror(task, status: -ETIMEDOUT); |
2532 | return; |
2533 | } |
2534 | |
2535 | if (RPC_IS_SOFT(task)) { |
2536 | /* |
2537 | * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has |
2538 | * been sent, it should time out only if the transport |
2539 | * connection gets terminally broken. |
2540 | */ |
2541 | if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && |
2542 | rpc_check_connected(req: task->tk_rqstp)) |
2543 | return; |
2544 | |
2545 | if (clnt->cl_chatty) { |
2546 | pr_notice_ratelimited( |
2547 | "%s: server %s not responding, timed out\n" , |
2548 | clnt->cl_program->name, |
2549 | task->tk_xprt->servername); |
2550 | } |
2551 | if (task->tk_flags & RPC_TASK_TIMEOUT) |
2552 | rpc_call_rpcerror(task, status: -ETIMEDOUT); |
2553 | else |
2554 | __rpc_call_rpcerror(task, tk_status: -EIO, rpc_status: -ETIMEDOUT); |
2555 | return; |
2556 | } |
2557 | |
2558 | if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { |
2559 | task->tk_flags |= RPC_CALL_MAJORSEEN; |
2560 | if (clnt->cl_chatty) { |
2561 | pr_notice_ratelimited( |
2562 | "%s: server %s not responding, still trying\n" , |
2563 | clnt->cl_program->name, |
2564 | task->tk_xprt->servername); |
2565 | } |
2566 | } |
2567 | rpc_force_rebind(clnt); |
2568 | /* |
2569 | * Did our request time out due to an RPCSEC_GSS out-of-sequence |
2570 | * event? RFC2203 requires the server to drop all such requests. |
2571 | */ |
2572 | rpcauth_invalcred(task); |
2573 | } |
2574 | |
2575 | /* |
2576 | * 7. Decode the RPC reply |
2577 | */ |
2578 | static void |
2579 | call_decode(struct rpc_task *task) |
2580 | { |
2581 | struct rpc_clnt *clnt = task->tk_client; |
2582 | struct rpc_rqst *req = task->tk_rqstp; |
2583 | struct xdr_stream xdr; |
2584 | int err; |
2585 | |
2586 | if (!task->tk_msg.rpc_proc->p_decode) { |
2587 | task->tk_action = rpc_exit_task; |
2588 | return; |
2589 | } |
2590 | |
2591 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
2592 | if (clnt->cl_chatty) { |
2593 | pr_notice_ratelimited("%s: server %s OK\n" , |
2594 | clnt->cl_program->name, |
2595 | task->tk_xprt->servername); |
2596 | } |
2597 | task->tk_flags &= ~RPC_CALL_MAJORSEEN; |
2598 | } |
2599 | |
2600 | /* |
2601 | * Did we ever call xprt_complete_rqst()? If not, we should assume |
2602 | * the message is incomplete. |
2603 | */ |
2604 | err = -EAGAIN; |
2605 | if (!req->rq_reply_bytes_recvd) |
2606 | goto out; |
2607 | |
2608 | /* Ensure that we see all writes made by xprt_complete_rqst() |
2609 | * before it changed req->rq_reply_bytes_recvd. |
2610 | */ |
2611 | smp_rmb(); |
2612 | |
2613 | req->rq_rcv_buf.len = req->rq_private_buf.len; |
2614 | trace_rpc_xdr_recvfrom(task, xdr: &req->rq_rcv_buf); |
2615 | |
2616 | /* Check that the softirq receive buffer is valid */ |
2617 | WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, |
2618 | sizeof(req->rq_rcv_buf)) != 0); |
2619 | |
2620 | xdr_init_decode(xdr: &xdr, buf: &req->rq_rcv_buf, |
2621 | p: req->rq_rcv_buf.head[0].iov_base, rqst: req); |
2622 | err = rpc_decode_header(task, xdr: &xdr); |
2623 | out: |
2624 | switch (err) { |
2625 | case 0: |
2626 | task->tk_action = rpc_exit_task; |
2627 | task->tk_status = rpcauth_unwrap_resp(task, xdr: &xdr); |
2628 | xdr_finish_decode(xdr: &xdr); |
2629 | return; |
2630 | case -EAGAIN: |
2631 | task->tk_status = 0; |
2632 | if (task->tk_client->cl_discrtry) |
2633 | xprt_conditional_disconnect(xprt: req->rq_xprt, |
2634 | cookie: req->rq_connect_cookie); |
2635 | task->tk_action = call_encode; |
2636 | rpc_check_timeout(task); |
2637 | break; |
2638 | case -EKEYREJECTED: |
2639 | task->tk_action = call_reserve; |
2640 | rpc_check_timeout(task); |
2641 | rpcauth_invalcred(task); |
2642 | /* Ensure we obtain a new XID if we retry! */ |
2643 | xprt_release(task); |
2644 | } |
2645 | } |
2646 | |
2647 | static int |
2648 | (struct rpc_task *task, struct xdr_stream *xdr) |
2649 | { |
2650 | struct rpc_clnt *clnt = task->tk_client; |
2651 | struct rpc_rqst *req = task->tk_rqstp; |
2652 | __be32 *p; |
2653 | int error; |
2654 | |
2655 | error = -EMSGSIZE; |
2656 | p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); |
2657 | if (!p) |
2658 | goto out_fail; |
2659 | *p++ = req->rq_xid; |
2660 | *p++ = rpc_call; |
2661 | *p++ = cpu_to_be32(RPC_VERSION); |
2662 | *p++ = cpu_to_be32(clnt->cl_prog); |
2663 | *p++ = cpu_to_be32(clnt->cl_vers); |
2664 | *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); |
2665 | |
2666 | error = rpcauth_marshcred(task, xdr); |
2667 | if (error < 0) |
2668 | goto out_fail; |
2669 | return 0; |
2670 | out_fail: |
2671 | trace_rpc_bad_callhdr(task); |
2672 | rpc_call_rpcerror(task, status: error); |
2673 | return error; |
2674 | } |
2675 | |
2676 | static noinline int |
2677 | (struct rpc_task *task, struct xdr_stream *xdr) |
2678 | { |
2679 | struct rpc_clnt *clnt = task->tk_client; |
2680 | int error; |
2681 | __be32 *p; |
2682 | |
2683 | /* RFC-1014 says that the representation of XDR data must be a |
2684 | * multiple of four bytes |
2685 | * - if it isn't pointer subtraction in the NFS client may give |
2686 | * undefined results |
2687 | */ |
2688 | if (task->tk_rqstp->rq_rcv_buf.len & 3) |
2689 | goto out_unparsable; |
2690 | |
2691 | p = xdr_inline_decode(xdr, nbytes: 3 * sizeof(*p)); |
2692 | if (!p) |
2693 | goto out_unparsable; |
2694 | p++; /* skip XID */ |
2695 | if (*p++ != rpc_reply) |
2696 | goto out_unparsable; |
2697 | if (*p++ != rpc_msg_accepted) |
2698 | goto out_msg_denied; |
2699 | |
2700 | error = rpcauth_checkverf(task, xdr); |
2701 | if (error) |
2702 | goto out_verifier; |
2703 | |
2704 | p = xdr_inline_decode(xdr, nbytes: sizeof(*p)); |
2705 | if (!p) |
2706 | goto out_unparsable; |
2707 | switch (*p) { |
2708 | case rpc_success: |
2709 | return 0; |
2710 | case rpc_prog_unavail: |
2711 | trace_rpc__prog_unavail(task); |
2712 | error = -EPFNOSUPPORT; |
2713 | goto out_err; |
2714 | case rpc_prog_mismatch: |
2715 | trace_rpc__prog_mismatch(task); |
2716 | error = -EPROTONOSUPPORT; |
2717 | goto out_err; |
2718 | case rpc_proc_unavail: |
2719 | trace_rpc__proc_unavail(task); |
2720 | error = -EOPNOTSUPP; |
2721 | goto out_err; |
2722 | case rpc_garbage_args: |
2723 | case rpc_system_err: |
2724 | trace_rpc__garbage_args(task); |
2725 | error = -EIO; |
2726 | break; |
2727 | default: |
2728 | goto out_unparsable; |
2729 | } |
2730 | |
2731 | out_garbage: |
2732 | clnt->cl_stats->rpcgarbage++; |
2733 | if (task->tk_garb_retry) { |
2734 | task->tk_garb_retry--; |
2735 | task->tk_action = call_encode; |
2736 | return -EAGAIN; |
2737 | } |
2738 | out_err: |
2739 | rpc_call_rpcerror(task, status: error); |
2740 | return error; |
2741 | |
2742 | out_unparsable: |
2743 | trace_rpc__unparsable(task); |
2744 | error = -EIO; |
2745 | goto out_garbage; |
2746 | |
2747 | out_verifier: |
2748 | trace_rpc_bad_verifier(task); |
2749 | switch (error) { |
2750 | case -EPROTONOSUPPORT: |
2751 | goto out_err; |
2752 | case -EACCES: |
2753 | /* Re-encode with a fresh cred */ |
2754 | fallthrough; |
2755 | default: |
2756 | goto out_garbage; |
2757 | } |
2758 | |
2759 | out_msg_denied: |
2760 | error = -EACCES; |
2761 | p = xdr_inline_decode(xdr, nbytes: sizeof(*p)); |
2762 | if (!p) |
2763 | goto out_unparsable; |
2764 | switch (*p++) { |
2765 | case rpc_auth_error: |
2766 | break; |
2767 | case rpc_mismatch: |
2768 | trace_rpc__mismatch(task); |
2769 | error = -EPROTONOSUPPORT; |
2770 | goto out_err; |
2771 | default: |
2772 | goto out_unparsable; |
2773 | } |
2774 | |
2775 | p = xdr_inline_decode(xdr, nbytes: sizeof(*p)); |
2776 | if (!p) |
2777 | goto out_unparsable; |
2778 | switch (*p++) { |
2779 | case rpc_autherr_rejectedcred: |
2780 | case rpc_autherr_rejectedverf: |
2781 | case rpcsec_gsserr_credproblem: |
2782 | case rpcsec_gsserr_ctxproblem: |
2783 | rpcauth_invalcred(task); |
2784 | if (!task->tk_cred_retry) |
2785 | break; |
2786 | task->tk_cred_retry--; |
2787 | trace_rpc__stale_creds(task); |
2788 | return -EKEYREJECTED; |
2789 | case rpc_autherr_badcred: |
2790 | case rpc_autherr_badverf: |
2791 | /* possibly garbled cred/verf? */ |
2792 | if (!task->tk_garb_retry) |
2793 | break; |
2794 | task->tk_garb_retry--; |
2795 | trace_rpc__bad_creds(task); |
2796 | task->tk_action = call_encode; |
2797 | return -EAGAIN; |
2798 | case rpc_autherr_tooweak: |
2799 | trace_rpc__auth_tooweak(task); |
2800 | pr_warn("RPC: server %s requires stronger authentication.\n" , |
2801 | task->tk_xprt->servername); |
2802 | break; |
2803 | default: |
2804 | goto out_unparsable; |
2805 | } |
2806 | goto out_err; |
2807 | } |
2808 | |
2809 | static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, |
2810 | const void *obj) |
2811 | { |
2812 | } |
2813 | |
2814 | static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, |
2815 | void *obj) |
2816 | { |
2817 | return 0; |
2818 | } |
2819 | |
2820 | static const struct rpc_procinfo rpcproc_null = { |
2821 | .p_encode = rpcproc_encode_null, |
2822 | .p_decode = rpcproc_decode_null, |
2823 | }; |
2824 | |
2825 | static const struct rpc_procinfo rpcproc_null_noreply = { |
2826 | .p_encode = rpcproc_encode_null, |
2827 | }; |
2828 | |
2829 | static void |
2830 | rpc_null_call_prepare(struct rpc_task *task, void *data) |
2831 | { |
2832 | task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; |
2833 | rpc_call_start(task); |
2834 | } |
2835 | |
2836 | static const struct rpc_call_ops rpc_null_ops = { |
2837 | .rpc_call_prepare = rpc_null_call_prepare, |
2838 | .rpc_call_done = rpc_default_callback, |
2839 | }; |
2840 | |
2841 | static |
2842 | struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, |
2843 | struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, |
2844 | const struct rpc_call_ops *ops, void *data) |
2845 | { |
2846 | struct rpc_message msg = { |
2847 | .rpc_proc = &rpcproc_null, |
2848 | }; |
2849 | struct rpc_task_setup task_setup_data = { |
2850 | .rpc_client = clnt, |
2851 | .rpc_xprt = xprt, |
2852 | .rpc_message = &msg, |
2853 | .rpc_op_cred = cred, |
2854 | .callback_ops = ops ?: &rpc_null_ops, |
2855 | .callback_data = data, |
2856 | .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | |
2857 | RPC_TASK_NULLCREDS, |
2858 | }; |
2859 | |
2860 | return rpc_run_task(&task_setup_data); |
2861 | } |
2862 | |
2863 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) |
2864 | { |
2865 | return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); |
2866 | } |
2867 | EXPORT_SYMBOL_GPL(rpc_call_null); |
2868 | |
2869 | static int rpc_ping(struct rpc_clnt *clnt) |
2870 | { |
2871 | struct rpc_task *task; |
2872 | int status; |
2873 | |
2874 | if (clnt->cl_auth->au_ops->ping) |
2875 | return clnt->cl_auth->au_ops->ping(clnt); |
2876 | |
2877 | task = rpc_call_null_helper(clnt, NULL, NULL, flags: 0, NULL, NULL); |
2878 | if (IS_ERR(ptr: task)) |
2879 | return PTR_ERR(ptr: task); |
2880 | status = task->tk_status; |
2881 | rpc_put_task(task); |
2882 | return status; |
2883 | } |
2884 | |
2885 | static int rpc_ping_noreply(struct rpc_clnt *clnt) |
2886 | { |
2887 | struct rpc_message msg = { |
2888 | .rpc_proc = &rpcproc_null_noreply, |
2889 | }; |
2890 | struct rpc_task_setup task_setup_data = { |
2891 | .rpc_client = clnt, |
2892 | .rpc_message = &msg, |
2893 | .callback_ops = &rpc_null_ops, |
2894 | .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, |
2895 | }; |
2896 | struct rpc_task *task; |
2897 | int status; |
2898 | |
2899 | task = rpc_run_task(&task_setup_data); |
2900 | if (IS_ERR(ptr: task)) |
2901 | return PTR_ERR(ptr: task); |
2902 | status = task->tk_status; |
2903 | rpc_put_task(task); |
2904 | return status; |
2905 | } |
2906 | |
2907 | struct rpc_cb_add_xprt_calldata { |
2908 | struct rpc_xprt_switch *xps; |
2909 | struct rpc_xprt *xprt; |
2910 | }; |
2911 | |
2912 | static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) |
2913 | { |
2914 | struct rpc_cb_add_xprt_calldata *data = calldata; |
2915 | |
2916 | if (task->tk_status == 0) |
2917 | rpc_xprt_switch_add_xprt(xps: data->xps, xprt: data->xprt); |
2918 | } |
2919 | |
2920 | static void rpc_cb_add_xprt_release(void *calldata) |
2921 | { |
2922 | struct rpc_cb_add_xprt_calldata *data = calldata; |
2923 | |
2924 | xprt_put(xprt: data->xprt); |
2925 | xprt_switch_put(xps: data->xps); |
2926 | kfree(objp: data); |
2927 | } |
2928 | |
2929 | static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { |
2930 | .rpc_call_prepare = rpc_null_call_prepare, |
2931 | .rpc_call_done = rpc_cb_add_xprt_done, |
2932 | .rpc_release = rpc_cb_add_xprt_release, |
2933 | }; |
2934 | |
2935 | /** |
2936 | * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt |
2937 | * @clnt: pointer to struct rpc_clnt |
2938 | * @xps: pointer to struct rpc_xprt_switch, |
2939 | * @xprt: pointer struct rpc_xprt |
2940 | * @in_max_connect: pointer to the max_connect value for the passed in xprt transport |
2941 | */ |
2942 | int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, |
2943 | struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, |
2944 | void *in_max_connect) |
2945 | { |
2946 | struct rpc_cb_add_xprt_calldata *data; |
2947 | struct rpc_task *task; |
2948 | int max_connect = clnt->cl_max_connect; |
2949 | |
2950 | if (in_max_connect) |
2951 | max_connect = *(int *)in_max_connect; |
2952 | if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { |
2953 | rcu_read_lock(); |
2954 | pr_warn("SUNRPC: reached max allowed number (%d) did not add " |
2955 | "transport to server: %s\n" , max_connect, |
2956 | rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); |
2957 | rcu_read_unlock(); |
2958 | return -EINVAL; |
2959 | } |
2960 | |
2961 | data = kmalloc(size: sizeof(*data), GFP_KERNEL); |
2962 | if (!data) |
2963 | return -ENOMEM; |
2964 | data->xps = xprt_switch_get(xps); |
2965 | data->xprt = xprt_get(xprt); |
2966 | if (rpc_xprt_switch_has_addr(xps: data->xps, sap: (struct sockaddr *)&xprt->addr)) { |
2967 | rpc_cb_add_xprt_release(calldata: data); |
2968 | goto success; |
2969 | } |
2970 | |
2971 | task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, |
2972 | ops: &rpc_cb_add_xprt_call_ops, data); |
2973 | if (IS_ERR(ptr: task)) |
2974 | return PTR_ERR(ptr: task); |
2975 | |
2976 | data->xps->xps_nunique_destaddr_xprts++; |
2977 | rpc_put_task(task); |
2978 | success: |
2979 | return 1; |
2980 | } |
2981 | EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); |
2982 | |
2983 | static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, |
2984 | struct rpc_xprt *xprt, |
2985 | struct rpc_add_xprt_test *data) |
2986 | { |
2987 | struct rpc_task *task; |
2988 | int status = -EADDRINUSE; |
2989 | |
2990 | /* Test the connection */ |
2991 | task = rpc_call_null_helper(clnt, xprt, NULL, flags: 0, NULL, NULL); |
2992 | if (IS_ERR(ptr: task)) |
2993 | return PTR_ERR(ptr: task); |
2994 | |
2995 | status = task->tk_status; |
2996 | rpc_put_task(task); |
2997 | |
2998 | if (status < 0) |
2999 | return status; |
3000 | |
3001 | /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ |
3002 | data->add_xprt_test(clnt, xprt, data->data); |
3003 | |
3004 | return 0; |
3005 | } |
3006 | |
3007 | /** |
3008 | * rpc_clnt_setup_test_and_add_xprt() |
3009 | * |
3010 | * This is an rpc_clnt_add_xprt setup() function which returns 1 so: |
3011 | * 1) caller of the test function must dereference the rpc_xprt_switch |
3012 | * and the rpc_xprt. |
3013 | * 2) test function must call rpc_xprt_switch_add_xprt, usually in |
3014 | * the rpc_call_done routine. |
3015 | * |
3016 | * Upon success (return of 1), the test function adds the new |
3017 | * transport to the rpc_clnt xprt switch |
3018 | * |
3019 | * @clnt: struct rpc_clnt to get the new transport |
3020 | * @xps: the rpc_xprt_switch to hold the new transport |
3021 | * @xprt: the rpc_xprt to test |
3022 | * @data: a struct rpc_add_xprt_test pointer that holds the test function |
3023 | * and test function call data |
3024 | */ |
3025 | int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, |
3026 | struct rpc_xprt_switch *xps, |
3027 | struct rpc_xprt *xprt, |
3028 | void *data) |
3029 | { |
3030 | int status = -EADDRINUSE; |
3031 | |
3032 | xprt = xprt_get(xprt); |
3033 | xprt_switch_get(xps); |
3034 | |
3035 | if (rpc_xprt_switch_has_addr(xps, sap: (struct sockaddr *)&xprt->addr)) |
3036 | goto out_err; |
3037 | |
3038 | status = rpc_clnt_add_xprt_helper(clnt, xprt, data); |
3039 | if (status < 0) |
3040 | goto out_err; |
3041 | |
3042 | status = 1; |
3043 | out_err: |
3044 | xprt_put(xprt); |
3045 | xprt_switch_put(xps); |
3046 | if (status < 0) |
3047 | pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " |
3048 | "added\n" , status, |
3049 | xprt->address_strings[RPC_DISPLAY_ADDR]); |
3050 | /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ |
3051 | return status; |
3052 | } |
3053 | EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); |
3054 | |
3055 | /** |
3056 | * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt |
3057 | * @clnt: pointer to struct rpc_clnt |
3058 | * @xprtargs: pointer to struct xprt_create |
3059 | * @setup: callback to test and/or set up the connection |
3060 | * @data: pointer to setup function data |
3061 | * |
3062 | * Creates a new transport using the parameters set in args and |
3063 | * adds it to clnt. |
3064 | * If ping is set, then test that connectivity succeeds before |
3065 | * adding the new transport. |
3066 | * |
3067 | */ |
3068 | int rpc_clnt_add_xprt(struct rpc_clnt *clnt, |
3069 | struct xprt_create *xprtargs, |
3070 | int (*setup)(struct rpc_clnt *, |
3071 | struct rpc_xprt_switch *, |
3072 | struct rpc_xprt *, |
3073 | void *), |
3074 | void *data) |
3075 | { |
3076 | struct rpc_xprt_switch *xps; |
3077 | struct rpc_xprt *xprt; |
3078 | unsigned long connect_timeout; |
3079 | unsigned long reconnect_timeout; |
3080 | unsigned char resvport, reuseport; |
3081 | int ret = 0, ident; |
3082 | |
3083 | rcu_read_lock(); |
3084 | xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); |
3085 | xprt = xprt_iter_xprt(xpi: &clnt->cl_xpi); |
3086 | if (xps == NULL || xprt == NULL) { |
3087 | rcu_read_unlock(); |
3088 | xprt_switch_put(xps); |
3089 | return -EAGAIN; |
3090 | } |
3091 | resvport = xprt->resvport; |
3092 | reuseport = xprt->reuseport; |
3093 | connect_timeout = xprt->connect_timeout; |
3094 | reconnect_timeout = xprt->max_reconnect_timeout; |
3095 | ident = xprt->xprt_class->ident; |
3096 | rcu_read_unlock(); |
3097 | |
3098 | if (!xprtargs->ident) |
3099 | xprtargs->ident = ident; |
3100 | xprtargs->xprtsec = clnt->cl_xprtsec; |
3101 | xprt = xprt_create_transport(args: xprtargs); |
3102 | if (IS_ERR(ptr: xprt)) { |
3103 | ret = PTR_ERR(ptr: xprt); |
3104 | goto out_put_switch; |
3105 | } |
3106 | xprt->resvport = resvport; |
3107 | xprt->reuseport = reuseport; |
3108 | |
3109 | if (xprtargs->connect_timeout) |
3110 | connect_timeout = xprtargs->connect_timeout; |
3111 | if (xprtargs->reconnect_timeout) |
3112 | reconnect_timeout = xprtargs->reconnect_timeout; |
3113 | if (xprt->ops->set_connect_timeout != NULL) |
3114 | xprt->ops->set_connect_timeout(xprt, |
3115 | connect_timeout, |
3116 | reconnect_timeout); |
3117 | |
3118 | rpc_xprt_switch_set_roundrobin(xps); |
3119 | if (setup) { |
3120 | ret = setup(clnt, xps, xprt, data); |
3121 | if (ret != 0) |
3122 | goto out_put_xprt; |
3123 | } |
3124 | rpc_xprt_switch_add_xprt(xps, xprt); |
3125 | out_put_xprt: |
3126 | xprt_put(xprt); |
3127 | out_put_switch: |
3128 | xprt_switch_put(xps); |
3129 | return ret; |
3130 | } |
3131 | EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); |
3132 | |
3133 | static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, |
3134 | struct rpc_xprt *xprt, |
3135 | struct rpc_add_xprt_test *data) |
3136 | { |
3137 | struct rpc_xprt *main_xprt; |
3138 | int status = 0; |
3139 | |
3140 | xprt_get(xprt); |
3141 | |
3142 | rcu_read_lock(); |
3143 | main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
3144 | status = rpc_cmp_addr_port(sap1: (struct sockaddr *)&xprt->addr, |
3145 | sap2: (struct sockaddr *)&main_xprt->addr); |
3146 | rcu_read_unlock(); |
3147 | xprt_put(xprt: main_xprt); |
3148 | if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) |
3149 | goto out; |
3150 | |
3151 | status = rpc_clnt_add_xprt_helper(clnt, xprt, data); |
3152 | out: |
3153 | xprt_put(xprt); |
3154 | return status; |
3155 | } |
3156 | |
3157 | /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking |
3158 | * @clnt rpc_clnt structure |
3159 | * |
3160 | * For each offlined transport found in the rpc_clnt structure call |
3161 | * the function rpc_xprt_probe_trunked() which will determine if this |
3162 | * transport still belongs to the trunking group. |
3163 | */ |
3164 | void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, |
3165 | struct rpc_add_xprt_test *data) |
3166 | { |
3167 | struct rpc_xprt_iter xpi; |
3168 | int ret; |
3169 | |
3170 | ret = rpc_clnt_xprt_iter_offline_init(clnt, xpi: &xpi); |
3171 | if (ret) |
3172 | return; |
3173 | for (;;) { |
3174 | struct rpc_xprt *xprt = xprt_iter_get_next(xpi: &xpi); |
3175 | |
3176 | if (!xprt) |
3177 | break; |
3178 | ret = rpc_xprt_probe_trunked(clnt, xprt, data); |
3179 | xprt_put(xprt); |
3180 | if (ret < 0) |
3181 | break; |
3182 | xprt_iter_rewind(xpi: &xpi); |
3183 | } |
3184 | xprt_iter_destroy(xpi: &xpi); |
3185 | } |
3186 | EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); |
3187 | |
3188 | static int rpc_xprt_offline(struct rpc_clnt *clnt, |
3189 | struct rpc_xprt *xprt, |
3190 | void *data) |
3191 | { |
3192 | struct rpc_xprt *main_xprt; |
3193 | struct rpc_xprt_switch *xps; |
3194 | int err = 0; |
3195 | |
3196 | xprt_get(xprt); |
3197 | |
3198 | rcu_read_lock(); |
3199 | main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
3200 | xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); |
3201 | err = rpc_cmp_addr_port(sap1: (struct sockaddr *)&xprt->addr, |
3202 | sap2: (struct sockaddr *)&main_xprt->addr); |
3203 | rcu_read_unlock(); |
3204 | xprt_put(xprt: main_xprt); |
3205 | if (err) |
3206 | goto out; |
3207 | |
3208 | if (wait_on_bit_lock(word: &xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { |
3209 | err = -EINTR; |
3210 | goto out; |
3211 | } |
3212 | xprt_set_offline_locked(xprt, xps); |
3213 | |
3214 | xprt_release_write(xprt, NULL); |
3215 | out: |
3216 | xprt_put(xprt); |
3217 | xprt_switch_put(xps); |
3218 | return err; |
3219 | } |
3220 | |
3221 | /* rpc_clnt_manage_trunked_xprts -- offline trunked transports |
3222 | * @clnt rpc_clnt structure |
3223 | * |
3224 | * For each active transport found in the rpc_clnt structure call |
3225 | * the function rpc_xprt_offline() which will identify trunked transports |
3226 | * and will mark them offline. |
3227 | */ |
3228 | void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) |
3229 | { |
3230 | rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); |
3231 | } |
3232 | EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); |
3233 | |
3234 | struct connect_timeout_data { |
3235 | unsigned long connect_timeout; |
3236 | unsigned long reconnect_timeout; |
3237 | }; |
3238 | |
3239 | static int |
3240 | rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, |
3241 | struct rpc_xprt *xprt, |
3242 | void *data) |
3243 | { |
3244 | struct connect_timeout_data *timeo = data; |
3245 | |
3246 | if (xprt->ops->set_connect_timeout) |
3247 | xprt->ops->set_connect_timeout(xprt, |
3248 | timeo->connect_timeout, |
3249 | timeo->reconnect_timeout); |
3250 | return 0; |
3251 | } |
3252 | |
3253 | void |
3254 | rpc_set_connect_timeout(struct rpc_clnt *clnt, |
3255 | unsigned long connect_timeout, |
3256 | unsigned long reconnect_timeout) |
3257 | { |
3258 | struct connect_timeout_data timeout = { |
3259 | .connect_timeout = connect_timeout, |
3260 | .reconnect_timeout = reconnect_timeout, |
3261 | }; |
3262 | rpc_clnt_iterate_for_each_xprt(clnt, |
3263 | rpc_xprt_set_connect_timeout, |
3264 | &timeout); |
3265 | } |
3266 | EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); |
3267 | |
3268 | void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
3269 | { |
3270 | struct rpc_xprt_switch *xps; |
3271 | |
3272 | xps = rpc_clnt_xprt_switch_get(clnt); |
3273 | xprt_set_online_locked(xprt, xps); |
3274 | xprt_switch_put(xps); |
3275 | } |
3276 | |
3277 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
3278 | { |
3279 | struct rpc_xprt_switch *xps; |
3280 | |
3281 | if (rpc_clnt_xprt_switch_has_addr(clnt, |
3282 | sap: (const struct sockaddr *)&xprt->addr)) { |
3283 | return rpc_clnt_xprt_set_online(clnt, xprt); |
3284 | } |
3285 | |
3286 | xps = rpc_clnt_xprt_switch_get(clnt); |
3287 | rpc_xprt_switch_add_xprt(xps, xprt); |
3288 | xprt_switch_put(xps); |
3289 | } |
3290 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); |
3291 | |
3292 | void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
3293 | { |
3294 | struct rpc_xprt_switch *xps; |
3295 | |
3296 | rcu_read_lock(); |
3297 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); |
3298 | rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), |
3299 | xprt, offline: 0); |
3300 | xps->xps_nunique_destaddr_xprts--; |
3301 | rcu_read_unlock(); |
3302 | } |
3303 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); |
3304 | |
3305 | bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, |
3306 | const struct sockaddr *sap) |
3307 | { |
3308 | struct rpc_xprt_switch *xps; |
3309 | bool ret; |
3310 | |
3311 | rcu_read_lock(); |
3312 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); |
3313 | ret = rpc_xprt_switch_has_addr(xps, sap); |
3314 | rcu_read_unlock(); |
3315 | return ret; |
3316 | } |
3317 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); |
3318 | |
3319 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
3320 | static void (void) |
3321 | { |
3322 | printk(KERN_INFO "-pid- flgs status -client- --rqstp- " |
3323 | "-timeout ---ops--\n" ); |
3324 | } |
3325 | |
3326 | static void rpc_show_task(const struct rpc_clnt *clnt, |
3327 | const struct rpc_task *task) |
3328 | { |
3329 | const char *rpc_waitq = "none" ; |
3330 | |
3331 | if (RPC_IS_QUEUED(task)) |
3332 | rpc_waitq = rpc_qname(q: task->tk_waitqueue); |
3333 | |
3334 | printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n" , |
3335 | task->tk_pid, task->tk_flags, task->tk_status, |
3336 | clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, |
3337 | clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), |
3338 | task->tk_action, rpc_waitq); |
3339 | } |
3340 | |
3341 | void rpc_show_tasks(struct net *net) |
3342 | { |
3343 | struct rpc_clnt *clnt; |
3344 | struct rpc_task *task; |
3345 | int = 0; |
3346 | struct sunrpc_net *sn = net_generic(net, id: sunrpc_net_id); |
3347 | |
3348 | spin_lock(lock: &sn->rpc_client_lock); |
3349 | list_for_each_entry(clnt, &sn->all_clients, cl_clients) { |
3350 | spin_lock(lock: &clnt->cl_lock); |
3351 | list_for_each_entry(task, &clnt->cl_tasks, tk_task) { |
3352 | if (!header) { |
3353 | rpc_show_header(); |
3354 | header++; |
3355 | } |
3356 | rpc_show_task(clnt, task); |
3357 | } |
3358 | spin_unlock(lock: &clnt->cl_lock); |
3359 | } |
3360 | spin_unlock(lock: &sn->rpc_client_lock); |
3361 | } |
3362 | #endif |
3363 | |
3364 | #if IS_ENABLED(CONFIG_SUNRPC_SWAP) |
3365 | static int |
3366 | rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, |
3367 | struct rpc_xprt *xprt, |
3368 | void *dummy) |
3369 | { |
3370 | return xprt_enable_swap(xprt); |
3371 | } |
3372 | |
3373 | int |
3374 | rpc_clnt_swap_activate(struct rpc_clnt *clnt) |
3375 | { |
3376 | while (clnt != clnt->cl_parent) |
3377 | clnt = clnt->cl_parent; |
3378 | if (atomic_inc_return(v: &clnt->cl_swapper) == 1) |
3379 | return rpc_clnt_iterate_for_each_xprt(clnt, |
3380 | rpc_clnt_swap_activate_callback, NULL); |
3381 | return 0; |
3382 | } |
3383 | EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); |
3384 | |
3385 | static int |
3386 | rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, |
3387 | struct rpc_xprt *xprt, |
3388 | void *dummy) |
3389 | { |
3390 | xprt_disable_swap(xprt); |
3391 | return 0; |
3392 | } |
3393 | |
3394 | void |
3395 | rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) |
3396 | { |
3397 | while (clnt != clnt->cl_parent) |
3398 | clnt = clnt->cl_parent; |
3399 | if (atomic_dec_if_positive(v: &clnt->cl_swapper) == 0) |
3400 | rpc_clnt_iterate_for_each_xprt(clnt, |
3401 | rpc_clnt_swap_deactivate_callback, NULL); |
3402 | } |
3403 | EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); |
3404 | #endif /* CONFIG_SUNRPC_SWAP */ |
3405 | |