1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/nfs/callback_proc.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFSv4 callback procedures
8 */
9
10#include <linux/errno.h>
11#include <linux/math.h>
12#include <linux/nfs4.h>
13#include <linux/nfs_fs.h>
14#include <linux/slab.h>
15#include <linux/rcupdate.h>
16#include <linux/types.h>
17
18#include "nfs4_fs.h"
19#include "callback.h"
20#include "delegation.h"
21#include "internal.h"
22#include "pnfs.h"
23#include "nfs4session.h"
24#include "nfs4trace.h"
25
26#define NFSDBG_FACILITY NFSDBG_CALLBACK
27
28__be32 nfs4_callback_getattr(void *argp, void *resp,
29 struct cb_process_state *cps)
30{
31 struct cb_getattrargs *args = argp;
32 struct cb_getattrres *res = resp;
33 struct nfs_delegation *delegation;
34 struct inode *inode;
35
36 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
37 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
38 goto out;
39
40 res->bitmap[0] = res->bitmap[1] = 0;
41 res->status = htonl(NFS4ERR_BADHANDLE);
42
43 dprintk_rcu("NFS: GETATTR callback request from %s\n",
44 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
45
46 inode = nfs_delegation_find_inode(clp: cps->clp, fhandle: &args->fh);
47 if (IS_ERR(ptr: inode)) {
48 if (inode == ERR_PTR(error: -EAGAIN))
49 res->status = htonl(NFS4ERR_DELAY);
50 trace_nfs4_cb_getattr(clp: cps->clp, fhandle: &args->fh, NULL,
51 error: -ntohl(res->status));
52 goto out;
53 }
54 rcu_read_lock();
55 delegation = nfs4_get_valid_delegation(inode);
56 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
57 goto out_iput;
58 res->size = i_size_read(inode);
59 res->change_attr = delegation->change_attr;
60 if (nfs_have_writebacks(inode))
61 res->change_attr++;
62 res->ctime = inode_get_ctime(inode);
63 res->mtime = inode_get_mtime(inode);
64 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
65 args->bitmap[0];
66 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
67 args->bitmap[1];
68 res->status = 0;
69out_iput:
70 rcu_read_unlock();
71 trace_nfs4_cb_getattr(clp: cps->clp, fhandle: &args->fh, inode, error: -ntohl(res->status));
72 nfs_iput_and_deactive(inode);
73out:
74 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
75 return res->status;
76}
77
78__be32 nfs4_callback_recall(void *argp, void *resp,
79 struct cb_process_state *cps)
80{
81 struct cb_recallargs *args = argp;
82 struct inode *inode;
83 __be32 res;
84
85 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
86 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
87 goto out;
88
89 dprintk_rcu("NFS: RECALL callback request from %s\n",
90 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
91
92 res = htonl(NFS4ERR_BADHANDLE);
93 inode = nfs_delegation_find_inode(clp: cps->clp, fhandle: &args->fh);
94 if (IS_ERR(ptr: inode)) {
95 if (inode == ERR_PTR(error: -EAGAIN))
96 res = htonl(NFS4ERR_DELAY);
97 trace_nfs4_cb_recall(clp: cps->clp, fhandle: &args->fh, NULL,
98 stateid: &args->stateid, error: -ntohl(res));
99 goto out;
100 }
101 /* Set up a helper thread to actually return the delegation */
102 switch (nfs_async_inode_return_delegation(inode, stateid: &args->stateid)) {
103 case 0:
104 res = 0;
105 break;
106 case -ENOENT:
107 res = htonl(NFS4ERR_BAD_STATEID);
108 break;
109 default:
110 res = htonl(NFS4ERR_RESOURCE);
111 }
112 trace_nfs4_cb_recall(clp: cps->clp, fhandle: &args->fh, inode,
113 stateid: &args->stateid, error: -ntohl(res));
114 nfs_iput_and_deactive(inode);
115out:
116 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
117 return res;
118}
119
120#if defined(CONFIG_NFS_V4_1)
121
122/*
123 * Lookup a layout inode by stateid
124 *
125 * Note: returns a refcount on the inode and superblock
126 */
127static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
128 const nfs4_stateid *stateid)
129 __must_hold(RCU)
130{
131 struct nfs_server *server;
132 struct inode *inode;
133 struct pnfs_layout_hdr *lo;
134
135 rcu_read_lock();
136 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
137 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
138 if (!pnfs_layout_is_valid(lo))
139 continue;
140 if (!nfs4_stateid_match_other(dst: stateid, src: &lo->plh_stateid))
141 continue;
142 if (nfs_sb_active(sb: server->super))
143 inode = igrab(lo->plh_inode);
144 else
145 inode = ERR_PTR(error: -EAGAIN);
146 rcu_read_unlock();
147 if (inode)
148 return inode;
149 nfs_sb_deactive(sb: server->super);
150 return ERR_PTR(error: -EAGAIN);
151 }
152 }
153 rcu_read_unlock();
154 return ERR_PTR(error: -ENOENT);
155}
156
157/*
158 * Lookup a layout inode by filehandle.
159 *
160 * Note: returns a refcount on the inode and superblock
161 *
162 */
163static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
164 const struct nfs_fh *fh)
165{
166 struct nfs_server *server;
167 struct nfs_inode *nfsi;
168 struct inode *inode;
169 struct pnfs_layout_hdr *lo;
170
171 rcu_read_lock();
172 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
173 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
174 nfsi = NFS_I(inode: lo->plh_inode);
175 if (nfs_compare_fh(a: fh, b: &nfsi->fh))
176 continue;
177 if (nfsi->layout != lo)
178 continue;
179 if (nfs_sb_active(sb: server->super))
180 inode = igrab(lo->plh_inode);
181 else
182 inode = ERR_PTR(error: -EAGAIN);
183 rcu_read_unlock();
184 if (inode)
185 return inode;
186 nfs_sb_deactive(sb: server->super);
187 return ERR_PTR(error: -EAGAIN);
188 }
189 }
190 rcu_read_unlock();
191 return ERR_PTR(error: -ENOENT);
192}
193
194static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
195 const struct nfs_fh *fh,
196 const nfs4_stateid *stateid)
197{
198 struct inode *inode;
199
200 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
201 if (inode == ERR_PTR(error: -ENOENT))
202 inode = nfs_layout_find_inode_by_fh(clp, fh);
203 return inode;
204}
205
206/*
207 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
208 */
209static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
210 const nfs4_stateid *new)
211{
212 u32 oldseq, newseq;
213
214 /* Is the stateid not initialised? */
215 if (!pnfs_layout_is_valid(lo))
216 return NFS4ERR_NOMATCHING_LAYOUT;
217
218 /* Mismatched stateid? */
219 if (!nfs4_stateid_match_other(dst: &lo->plh_stateid, src: new))
220 return NFS4ERR_BAD_STATEID;
221
222 newseq = be32_to_cpu(new->seqid);
223 /* Are we already in a layout recall situation? */
224 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
225 lo->plh_return_seq != 0) {
226 if (newseq < lo->plh_return_seq)
227 return NFS4ERR_OLD_STATEID;
228 if (newseq > lo->plh_return_seq)
229 return NFS4ERR_DELAY;
230 goto out;
231 }
232
233 /* Check that the stateid matches what we think it should be. */
234 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
235 if (newseq > oldseq + 1)
236 return NFS4ERR_DELAY;
237 /* Crazy server! */
238 if (newseq <= oldseq)
239 return NFS4ERR_OLD_STATEID;
240out:
241 return NFS_OK;
242}
243
244static u32 initiate_file_draining(struct nfs_client *clp,
245 struct cb_layoutrecallargs *args)
246{
247 struct inode *ino;
248 struct pnfs_layout_hdr *lo;
249 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
250 LIST_HEAD(free_me_list);
251
252 ino = nfs_layout_find_inode(clp, fh: &args->cbl_fh, stateid: &args->cbl_stateid);
253 if (IS_ERR(ptr: ino)) {
254 if (ino == ERR_PTR(error: -EAGAIN))
255 rv = NFS4ERR_DELAY;
256 goto out_noput;
257 }
258
259 pnfs_layoutcommit_inode(inode: ino, sync: false);
260
261
262 spin_lock(lock: &ino->i_lock);
263 lo = NFS_I(inode: ino)->layout;
264 if (!lo) {
265 spin_unlock(lock: &ino->i_lock);
266 goto out;
267 }
268 pnfs_get_layout_hdr(lo);
269 rv = pnfs_check_callback_stateid(lo, new: &args->cbl_stateid);
270 if (rv != NFS_OK)
271 goto unlock;
272
273 /*
274 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
275 */
276 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
277 rv = NFS4ERR_DELAY;
278 goto unlock;
279 }
280
281 pnfs_set_layout_stateid(lo, new: &args->cbl_stateid, NULL, update_barrier: true);
282 switch (pnfs_mark_matching_lsegs_return(lo, tmp_list: &free_me_list,
283 recall_range: &args->cbl_range,
284 be32_to_cpu(args->cbl_stateid.seqid))) {
285 case 0:
286 case -EBUSY:
287 /* There are layout segments that need to be returned */
288 rv = NFS4_OK;
289 break;
290 case -ENOENT:
291 set_bit(nr: NFS_LAYOUT_DRAIN, addr: &lo->plh_flags);
292 /* Embrace your forgetfulness! */
293 rv = NFS4ERR_NOMATCHING_LAYOUT;
294
295 if (NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range) {
296 NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range(lo,
297 &args->cbl_range);
298 }
299 }
300unlock:
301 spin_unlock(lock: &ino->i_lock);
302 pnfs_free_lseg_list(tmp_list: &free_me_list);
303 /* Free all lsegs that are attached to commit buckets */
304 nfs_commit_inode(ino, 0);
305 pnfs_put_layout_hdr(lo);
306out:
307 nfs_iput_and_deactive(inode: ino);
308out_noput:
309 trace_nfs4_cb_layoutrecall_file(clp, fhandle: &args->cbl_fh, inode: ino,
310 stateid: &args->cbl_stateid, error: -rv);
311 return rv;
312}
313
314static u32 initiate_bulk_draining(struct nfs_client *clp,
315 struct cb_layoutrecallargs *args)
316{
317 int stat;
318
319 if (args->cbl_recall_type == RETURN_FSID)
320 stat = pnfs_destroy_layouts_byfsid(clp, fsid: &args->cbl_fsid, is_recall: true);
321 else
322 stat = pnfs_destroy_layouts_byclid(clp, is_recall: true);
323 if (stat != 0)
324 return NFS4ERR_DELAY;
325 return NFS4ERR_NOMATCHING_LAYOUT;
326}
327
328static u32 do_callback_layoutrecall(struct nfs_client *clp,
329 struct cb_layoutrecallargs *args)
330{
331 if (args->cbl_recall_type == RETURN_FILE)
332 return initiate_file_draining(clp, args);
333 return initiate_bulk_draining(clp, args);
334}
335
336__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
337 struct cb_process_state *cps)
338{
339 struct cb_layoutrecallargs *args = argp;
340 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
341
342 if (cps->clp)
343 res = do_callback_layoutrecall(clp: cps->clp, args);
344 return cpu_to_be32(res);
345}
346
347static void pnfs_recall_all_layouts(struct nfs_client *clp)
348{
349 struct cb_layoutrecallargs args;
350
351 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
352 memset(&args, 0, sizeof(args));
353 args.cbl_recall_type = RETURN_ALL;
354 /* FIXME we ignore errors, what should we do? */
355 do_callback_layoutrecall(clp, args: &args);
356}
357
358__be32 nfs4_callback_devicenotify(void *argp, void *resp,
359 struct cb_process_state *cps)
360{
361 struct cb_devicenotifyargs *args = argp;
362 const struct pnfs_layoutdriver_type *ld = NULL;
363 uint32_t i;
364 __be32 res = 0;
365
366 if (!cps->clp) {
367 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
368 goto out;
369 }
370
371 for (i = 0; i < args->ndevs; i++) {
372 struct cb_devicenotifyitem *dev = &args->devs[i];
373
374 if (!ld || ld->id != dev->cbd_layout_type) {
375 pnfs_put_layoutdriver(ld);
376 ld = pnfs_find_layoutdriver(id: dev->cbd_layout_type);
377 if (!ld)
378 continue;
379 }
380 nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
381 }
382 pnfs_put_layoutdriver(ld);
383out:
384 kfree(objp: args->devs);
385 return res;
386}
387
388/*
389 * Validate the sequenceID sent by the server.
390 * Return success if the sequenceID is one more than what we last saw on
391 * this slot, accounting for wraparound. Increments the slot's sequence.
392 *
393 * We don't yet implement a duplicate request cache, instead we set the
394 * back channel ca_maxresponsesize_cached to zero. This is OK for now
395 * since we only currently implement idempotent callbacks anyway.
396 *
397 * We have a single slot backchannel at this time, so we don't bother
398 * checking the used_slots bit array on the table. The lower layer guarantees
399 * a single outstanding callback request at a time.
400 */
401static __be32
402validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
403 const struct cb_sequenceargs * args)
404{
405 __be32 ret;
406
407 ret = cpu_to_be32(NFS4ERR_BADSLOT);
408 if (args->csa_slotid > tbl->server_highest_slotid)
409 goto out_err;
410
411 /* Replay */
412 if (args->csa_sequenceid == slot->seq_nr) {
413 ret = cpu_to_be32(NFS4ERR_DELAY);
414 if (nfs4_test_locked_slot(tbl, slotid: slot->slot_nr))
415 goto out_err;
416
417 /* Signal process_op to set this error on next op */
418 ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
419 if (args->csa_cachethis == 0)
420 goto out_err;
421
422 /* Liar! We never allowed you to set csa_cachethis != 0 */
423 ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
424 goto out_err;
425 }
426
427 /* Note: wraparound relies on seq_nr being of type u32 */
428 /* Misordered request */
429 ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
430 if (args->csa_sequenceid != slot->seq_nr + 1)
431 goto out_err;
432
433 return cpu_to_be32(NFS4_OK);
434
435out_err:
436 trace_nfs4_cb_seqid_err(args, status: ret);
437 return ret;
438}
439
440/*
441 * For each referring call triple, check the session's slot table for
442 * a match. If the slot is in use and the sequence numbers match, the
443 * client is still waiting for a response to the original request.
444 */
445static int referring_call_exists(struct nfs_client *clp,
446 uint32_t nrclists,
447 struct referring_call_list *rclists,
448 spinlock_t *lock)
449 __releases(lock)
450 __acquires(lock)
451{
452 int status = 0;
453 int i, j;
454 struct nfs4_session *session;
455 struct nfs4_slot_table *tbl;
456 struct referring_call_list *rclist;
457 struct referring_call *ref;
458
459 /*
460 * XXX When client trunking is implemented, this becomes
461 * a session lookup from within the loop
462 */
463 session = clp->cl_session;
464 tbl = &session->fc_slot_table;
465
466 for (i = 0; i < nrclists; i++) {
467 rclist = &rclists[i];
468 if (memcmp(p: session->sess_id.data,
469 q: rclist->rcl_sessionid.data,
470 NFS4_MAX_SESSIONID_LEN) != 0)
471 continue;
472
473 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
474 ref = &rclist->rcl_refcalls[j];
475 spin_unlock(lock);
476 status = nfs4_slot_wait_on_seqid(tbl, slotid: ref->rc_slotid,
477 seq_nr: ref->rc_sequenceid, HZ >> 1) < 0;
478 spin_lock(lock);
479 if (status)
480 goto out;
481 }
482 }
483
484out:
485 return status;
486}
487
488__be32 nfs4_callback_sequence(void *argp, void *resp,
489 struct cb_process_state *cps)
490{
491 struct cb_sequenceargs *args = argp;
492 struct cb_sequenceres *res = resp;
493 struct nfs4_slot_table *tbl;
494 struct nfs4_slot *slot;
495 struct nfs_client *clp;
496 int i;
497 __be32 status = htonl(NFS4ERR_BADSESSION);
498
499 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
500 &args->csa_sessionid, cps->minorversion);
501 if (clp == NULL)
502 goto out;
503
504 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
505 goto out;
506
507 tbl = &clp->cl_session->bc_slot_table;
508
509 /* Set up res before grabbing the spinlock */
510 memcpy(&res->csr_sessionid, &args->csa_sessionid,
511 sizeof(res->csr_sessionid));
512 res->csr_sequenceid = args->csa_sequenceid;
513 res->csr_slotid = args->csa_slotid;
514
515 spin_lock(lock: &tbl->slot_tbl_lock);
516 /* state manager is resetting the session */
517 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
518 status = htonl(NFS4ERR_DELAY);
519 /* Return NFS4ERR_BADSESSION if we're draining the session
520 * in order to reset it.
521 */
522 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
523 status = htonl(NFS4ERR_BADSESSION);
524 goto out_unlock;
525 }
526
527 status = htonl(NFS4ERR_BADSLOT);
528 slot = nfs4_lookup_slot(tbl, slotid: args->csa_slotid);
529 if (IS_ERR(ptr: slot))
530 goto out_unlock;
531
532 res->csr_highestslotid = tbl->server_highest_slotid;
533 res->csr_target_highestslotid = tbl->target_highest_slotid;
534
535 status = validate_seqid(tbl, slot, args);
536 if (status)
537 goto out_unlock;
538 if (!nfs4_try_to_lock_slot(tbl, slot)) {
539 status = htonl(NFS4ERR_DELAY);
540 goto out_unlock;
541 }
542 cps->slot = slot;
543
544 /* The ca_maxresponsesize_cached is 0 with no DRC */
545 if (args->csa_cachethis != 0) {
546 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
547 goto out_unlock;
548 }
549
550 /*
551 * Check for pending referring calls. If a match is found, a
552 * related callback was received before the response to the original
553 * call.
554 */
555 if (referring_call_exists(clp, nrclists: args->csa_nrclists, rclists: args->csa_rclists,
556 lock: &tbl->slot_tbl_lock) < 0) {
557 status = htonl(NFS4ERR_DELAY);
558 goto out_unlock;
559 }
560
561 /*
562 * RFC5661 20.9.3
563 * If CB_SEQUENCE returns an error, then the state of the slot
564 * (sequence ID, cached reply) MUST NOT change.
565 */
566 slot->seq_nr = args->csa_sequenceid;
567out_unlock:
568 spin_unlock(lock: &tbl->slot_tbl_lock);
569
570out:
571 cps->clp = clp; /* put in nfs4_callback_compound */
572 for (i = 0; i < args->csa_nrclists; i++)
573 kfree(objp: args->csa_rclists[i].rcl_refcalls);
574 kfree(objp: args->csa_rclists);
575
576 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
577 cps->drc_status = status;
578 status = 0;
579 } else
580 res->csr_status = status;
581
582 trace_nfs4_cb_sequence(args, res, status);
583 return status;
584}
585
586static bool
587validate_bitmap_values(unsigned int mask)
588{
589 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
590}
591
592__be32 nfs4_callback_recallany(void *argp, void *resp,
593 struct cb_process_state *cps)
594{
595 struct cb_recallanyargs *args = argp;
596 __be32 status;
597 fmode_t flags = 0;
598 bool schedule_manager = false;
599
600 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
601 if (!cps->clp) /* set in cb_sequence */
602 goto out;
603
604 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
605 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
606
607 status = cpu_to_be32(NFS4ERR_INVAL);
608 if (!validate_bitmap_values(mask: args->craa_type_mask))
609 goto out;
610
611 status = cpu_to_be32(NFS4_OK);
612 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
613 flags = FMODE_READ;
614 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
615 flags |= FMODE_WRITE;
616 if (flags)
617 nfs_expire_unused_delegation_types(clp: cps->clp, flags);
618
619 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
620 pnfs_recall_all_layouts(clp: cps->clp);
621
622 if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
623 set_bit(nr: NFS4CLNT_RECALL_ANY_LAYOUT_READ, addr: &cps->clp->cl_state);
624 schedule_manager = true;
625 }
626 if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
627 set_bit(nr: NFS4CLNT_RECALL_ANY_LAYOUT_RW, addr: &cps->clp->cl_state);
628 schedule_manager = true;
629 }
630 if (schedule_manager)
631 nfs4_schedule_state_manager(cps->clp);
632
633out:
634 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
635 return status;
636}
637
638/* Reduce the fore channel's max_slots to the target value */
639__be32 nfs4_callback_recallslot(void *argp, void *resp,
640 struct cb_process_state *cps)
641{
642 struct cb_recallslotargs *args = argp;
643 struct nfs4_slot_table *fc_tbl;
644 __be32 status;
645
646 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
647 if (!cps->clp) /* set in cb_sequence */
648 goto out;
649
650 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
651 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
652 args->crsa_target_highest_slotid);
653
654 fc_tbl = &cps->clp->cl_session->fc_slot_table;
655
656 status = htonl(NFS4_OK);
657
658 nfs41_set_target_slotid(tbl: fc_tbl, target_highest_slotid: args->crsa_target_highest_slotid);
659 nfs41_notify_server(cps->clp);
660out:
661 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
662 return status;
663}
664
665__be32 nfs4_callback_notify_lock(void *argp, void *resp,
666 struct cb_process_state *cps)
667{
668 struct cb_notify_lock_args *args = argp;
669
670 if (!cps->clp) /* set in cb_sequence */
671 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
672
673 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
674 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
675
676 /* Don't wake anybody if the string looked bogus */
677 if (args->cbnl_valid)
678 __wake_up(wq_head: &cps->clp->cl_lock_waitq, TASK_NORMAL, nr: 0, key: args);
679
680 return htonl(NFS4_OK);
681}
682#endif /* CONFIG_NFS_V4_1 */
683#ifdef CONFIG_NFS_V4_2
684static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
685 struct cb_offloadargs *args)
686{
687 cp_state->count = args->wr_count;
688 cp_state->error = args->error;
689 if (!args->error) {
690 cp_state->verf.committed = args->wr_writeverf.committed;
691 memcpy(&cp_state->verf.verifier.data[0],
692 &args->wr_writeverf.verifier.data[0],
693 NFS4_VERIFIER_SIZE);
694 }
695}
696
697__be32 nfs4_callback_offload(void *data, void *dummy,
698 struct cb_process_state *cps)
699{
700 struct cb_offloadargs *args = data;
701 struct nfs_server *server;
702 struct nfs4_copy_state *copy, *tmp_copy;
703 bool found = false;
704
705 copy = kzalloc(size: sizeof(struct nfs4_copy_state), GFP_KERNEL);
706 if (!copy)
707 return htonl(NFS4ERR_SERVERFAULT);
708
709 spin_lock(lock: &cps->clp->cl_lock);
710 rcu_read_lock();
711 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
712 client_link) {
713 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
714 if (memcmp(p: args->coa_stateid.other,
715 q: tmp_copy->stateid.other,
716 size: sizeof(args->coa_stateid.other)))
717 continue;
718 nfs4_copy_cb_args(cp_state: tmp_copy, args);
719 complete(&tmp_copy->completion);
720 found = true;
721 goto out;
722 }
723 }
724out:
725 rcu_read_unlock();
726 if (!found) {
727 memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
728 nfs4_copy_cb_args(cp_state: copy, args);
729 list_add_tail(new: &copy->copies, head: &cps->clp->pending_cb_stateids);
730 } else
731 kfree(objp: copy);
732 spin_unlock(lock: &cps->clp->cl_lock);
733
734 trace_nfs4_cb_offload(cb_fh: &args->coa_fh, cb_stateid: &args->coa_stateid,
735 cb_count: args->wr_count, cb_error: args->error,
736 cb_how_stable: args->wr_writeverf.committed);
737 return 0;
738}
739#endif /* CONFIG_NFS_V4_2 */
740

source code of linux/fs/nfs/callback_proc.c