1 | /* |
2 | * pNFS functions to call and manage layout drivers. |
3 | * |
4 | * Copyright (c) 2002 [year of first publication] |
5 | * The Regents of the University of Michigan |
6 | * All Rights Reserved |
7 | * |
8 | * Dean Hildebrand <dhildebz@umich.edu> |
9 | * |
10 | * Permission is granted to use, copy, create derivative works, and |
11 | * redistribute this software and such derivative works for any purpose, |
12 | * so long as the name of the University of Michigan is not used in |
13 | * any advertising or publicity pertaining to the use or distribution |
14 | * of this software without specific, written prior authorization. If |
15 | * the above copyright notice or any other identification of the |
16 | * University of Michigan is included in any copy of any portion of |
17 | * this software, then the disclaimer below must also be included. |
18 | * |
19 | * This software is provided as is, without representation or warranty |
20 | * of any kind either express or implied, including without limitation |
21 | * the implied warranties of merchantability, fitness for a particular |
22 | * purpose, or noninfringement. The Regents of the University of |
23 | * Michigan shall not be liable for any damages, including special, |
24 | * indirect, incidental, or consequential damages, with respect to any |
25 | * claim arising out of or in connection with the use of the software, |
26 | * even if it has been or is hereafter advised of the possibility of |
27 | * such damages. |
28 | */ |
29 | |
30 | #include <linux/nfs_fs.h> |
31 | #include <linux/nfs_page.h> |
32 | #include <linux/module.h> |
33 | #include <linux/sort.h> |
34 | #include "internal.h" |
35 | #include "pnfs.h" |
36 | #include "iostat.h" |
37 | #include "nfs4trace.h" |
38 | #include "delegation.h" |
39 | #include "nfs42.h" |
40 | #include "nfs4_fs.h" |
41 | |
42 | #define NFSDBG_FACILITY NFSDBG_PNFS |
43 | #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ) |
44 | |
45 | /* Locking: |
46 | * |
47 | * pnfs_spinlock: |
48 | * protects pnfs_modules_tbl. |
49 | */ |
50 | static DEFINE_SPINLOCK(pnfs_spinlock); |
51 | |
52 | /* |
53 | * pnfs_modules_tbl holds all pnfs modules |
54 | */ |
55 | static LIST_HEAD(pnfs_modules_tbl); |
56 | |
57 | static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo); |
58 | static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo, |
59 | struct list_head *free_me, |
60 | const struct pnfs_layout_range *range, |
61 | u32 seq); |
62 | static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, |
63 | struct list_head *tmp_list); |
64 | |
65 | /* Return the registered pnfs layout driver module matching given id */ |
66 | static struct pnfs_layoutdriver_type * |
67 | find_pnfs_driver_locked(u32 id) |
68 | { |
69 | struct pnfs_layoutdriver_type *local; |
70 | |
71 | list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid) |
72 | if (local->id == id) |
73 | goto out; |
74 | local = NULL; |
75 | out: |
76 | dprintk("%s: Searching for id %u, found %p\n" , __func__, id, local); |
77 | return local; |
78 | } |
79 | |
80 | static struct pnfs_layoutdriver_type * |
81 | find_pnfs_driver(u32 id) |
82 | { |
83 | struct pnfs_layoutdriver_type *local; |
84 | |
85 | spin_lock(lock: &pnfs_spinlock); |
86 | local = find_pnfs_driver_locked(id); |
87 | if (local != NULL && !try_module_get(module: local->owner)) { |
88 | dprintk("%s: Could not grab reference on module\n" , __func__); |
89 | local = NULL; |
90 | } |
91 | spin_unlock(lock: &pnfs_spinlock); |
92 | return local; |
93 | } |
94 | |
95 | const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id) |
96 | { |
97 | return find_pnfs_driver(id); |
98 | } |
99 | |
100 | void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld) |
101 | { |
102 | if (ld) |
103 | module_put(module: ld->owner); |
104 | } |
105 | |
106 | void |
107 | unset_pnfs_layoutdriver(struct nfs_server *nfss) |
108 | { |
109 | if (nfss->pnfs_curr_ld) { |
110 | if (nfss->pnfs_curr_ld->clear_layoutdriver) |
111 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); |
112 | /* Decrement the MDS count. Purge the deviceid cache if zero */ |
113 | if (atomic_dec_and_test(v: &nfss->nfs_client->cl_mds_count)) |
114 | nfs4_deviceid_purge_client(nfss->nfs_client); |
115 | module_put(module: nfss->pnfs_curr_ld->owner); |
116 | } |
117 | nfss->pnfs_curr_ld = NULL; |
118 | } |
119 | |
120 | /* |
121 | * When the server sends a list of layout types, we choose one in the order |
122 | * given in the list below. |
123 | * |
124 | * FIXME: should this list be configurable in some fashion? module param? |
125 | * mount option? something else? |
126 | */ |
127 | static const u32 ld_prefs[] = { |
128 | LAYOUT_SCSI, |
129 | LAYOUT_BLOCK_VOLUME, |
130 | LAYOUT_OSD2_OBJECTS, |
131 | LAYOUT_FLEX_FILES, |
132 | LAYOUT_NFSV4_1_FILES, |
133 | 0 |
134 | }; |
135 | |
136 | static int |
137 | ld_cmp(const void *e1, const void *e2) |
138 | { |
139 | u32 ld1 = *((u32 *)e1); |
140 | u32 ld2 = *((u32 *)e2); |
141 | int i; |
142 | |
143 | for (i = 0; ld_prefs[i] != 0; i++) { |
144 | if (ld1 == ld_prefs[i]) |
145 | return -1; |
146 | |
147 | if (ld2 == ld_prefs[i]) |
148 | return 1; |
149 | } |
150 | return 0; |
151 | } |
152 | |
153 | /* |
154 | * Try to set the server's pnfs module to the pnfs layout type specified by id. |
155 | * Currently only one pNFS layout driver per filesystem is supported. |
156 | * |
157 | * @ids array of layout types supported by MDS. |
158 | */ |
159 | void |
160 | set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, |
161 | struct nfs_fsinfo *fsinfo) |
162 | { |
163 | struct pnfs_layoutdriver_type *ld_type = NULL; |
164 | u32 id; |
165 | int i; |
166 | |
167 | if (fsinfo->nlayouttypes == 0) |
168 | goto out_no_driver; |
169 | if (!(server->nfs_client->cl_exchange_flags & |
170 | (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { |
171 | printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n" , |
172 | __func__, server->nfs_client->cl_exchange_flags); |
173 | goto out_no_driver; |
174 | } |
175 | |
176 | sort(base: fsinfo->layouttype, num: fsinfo->nlayouttypes, |
177 | size: sizeof(*fsinfo->layouttype), cmp_func: ld_cmp, NULL); |
178 | |
179 | for (i = 0; i < fsinfo->nlayouttypes; i++) { |
180 | id = fsinfo->layouttype[i]; |
181 | ld_type = find_pnfs_driver(id); |
182 | if (!ld_type) { |
183 | request_module("%s-%u" , LAYOUT_NFSV4_1_MODULE_PREFIX, |
184 | id); |
185 | ld_type = find_pnfs_driver(id); |
186 | } |
187 | if (ld_type) |
188 | break; |
189 | } |
190 | |
191 | if (!ld_type) { |
192 | dprintk("%s: No pNFS module found!\n" , __func__); |
193 | goto out_no_driver; |
194 | } |
195 | |
196 | server->pnfs_curr_ld = ld_type; |
197 | if (ld_type->set_layoutdriver |
198 | && ld_type->set_layoutdriver(server, mntfh)) { |
199 | printk(KERN_ERR "NFS: %s: Error initializing pNFS layout " |
200 | "driver %u.\n" , __func__, id); |
201 | module_put(module: ld_type->owner); |
202 | goto out_no_driver; |
203 | } |
204 | /* Bump the MDS count */ |
205 | atomic_inc(v: &server->nfs_client->cl_mds_count); |
206 | |
207 | dprintk("%s: pNFS module for %u set\n" , __func__, id); |
208 | return; |
209 | |
210 | out_no_driver: |
211 | dprintk("%s: Using NFSv4 I/O\n" , __func__); |
212 | server->pnfs_curr_ld = NULL; |
213 | } |
214 | |
215 | int |
216 | pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) |
217 | { |
218 | int status = -EINVAL; |
219 | struct pnfs_layoutdriver_type *tmp; |
220 | |
221 | if (ld_type->id == 0) { |
222 | printk(KERN_ERR "NFS: %s id 0 is reserved\n" , __func__); |
223 | return status; |
224 | } |
225 | if (!ld_type->alloc_lseg || !ld_type->free_lseg) { |
226 | printk(KERN_ERR "NFS: %s Layout driver must provide " |
227 | "alloc_lseg and free_lseg.\n" , __func__); |
228 | return status; |
229 | } |
230 | |
231 | spin_lock(lock: &pnfs_spinlock); |
232 | tmp = find_pnfs_driver_locked(id: ld_type->id); |
233 | if (!tmp) { |
234 | list_add(new: &ld_type->pnfs_tblid, head: &pnfs_modules_tbl); |
235 | status = 0; |
236 | dprintk("%s Registering id:%u name:%s\n" , __func__, ld_type->id, |
237 | ld_type->name); |
238 | } else { |
239 | printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n" , |
240 | __func__, ld_type->id); |
241 | } |
242 | spin_unlock(lock: &pnfs_spinlock); |
243 | |
244 | return status; |
245 | } |
246 | EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver); |
247 | |
248 | void |
249 | pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type) |
250 | { |
251 | dprintk("%s Deregistering id:%u\n" , __func__, ld_type->id); |
252 | spin_lock(lock: &pnfs_spinlock); |
253 | list_del(entry: &ld_type->pnfs_tblid); |
254 | spin_unlock(lock: &pnfs_spinlock); |
255 | } |
256 | EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); |
257 | |
258 | /* |
259 | * pNFS client layout cache |
260 | */ |
261 | |
262 | /* Need to hold i_lock if caller does not already hold reference */ |
263 | void |
264 | pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo) |
265 | { |
266 | refcount_inc(r: &lo->plh_refcount); |
267 | } |
268 | |
269 | static struct pnfs_layout_hdr * |
270 | pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags) |
271 | { |
272 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode: ino)->pnfs_curr_ld; |
273 | return ld->alloc_layout_hdr(ino, gfp_flags); |
274 | } |
275 | |
276 | static void |
277 | pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) |
278 | { |
279 | struct nfs_server *server = NFS_SERVER(inode: lo->plh_inode); |
280 | struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; |
281 | |
282 | if (test_and_clear_bit(nr: NFS_LAYOUT_HASHED, addr: &lo->plh_flags)) { |
283 | struct nfs_client *clp = server->nfs_client; |
284 | |
285 | spin_lock(lock: &clp->cl_lock); |
286 | list_del_rcu(entry: &lo->plh_layouts); |
287 | spin_unlock(lock: &clp->cl_lock); |
288 | } |
289 | put_cred(cred: lo->plh_lc_cred); |
290 | return ld->free_layout_hdr(lo); |
291 | } |
292 | |
293 | static void |
294 | pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo) |
295 | { |
296 | struct nfs_inode *nfsi = NFS_I(inode: lo->plh_inode); |
297 | dprintk("%s: freeing layout cache %p\n" , __func__, lo); |
298 | nfsi->layout = NULL; |
299 | /* Reset MDS Threshold I/O counters */ |
300 | nfsi->write_io = 0; |
301 | nfsi->read_io = 0; |
302 | } |
303 | |
304 | void |
305 | pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) |
306 | { |
307 | struct inode *inode; |
308 | unsigned long i_state; |
309 | |
310 | if (!lo) |
311 | return; |
312 | inode = lo->plh_inode; |
313 | pnfs_layoutreturn_before_put_layout_hdr(lo); |
314 | |
315 | if (refcount_dec_and_lock(r: &lo->plh_refcount, lock: &inode->i_lock)) { |
316 | if (!list_empty(head: &lo->plh_segs)) |
317 | WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n" ); |
318 | pnfs_detach_layout_hdr(lo); |
319 | i_state = inode->i_state; |
320 | spin_unlock(lock: &inode->i_lock); |
321 | pnfs_free_layout_hdr(lo); |
322 | /* Notify pnfs_destroy_layout_final() that we're done */ |
323 | if (i_state & (I_FREEING | I_CLEAR)) |
324 | wake_up_var(var: lo); |
325 | } |
326 | } |
327 | |
328 | static struct inode * |
329 | pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo) |
330 | { |
331 | struct inode *inode = igrab(lo->plh_inode); |
332 | if (inode) |
333 | return inode; |
334 | set_bit(nr: NFS_LAYOUT_INODE_FREEING, addr: &lo->plh_flags); |
335 | return NULL; |
336 | } |
337 | |
338 | /* |
339 | * Compare 2 layout stateid sequence ids, to see which is newer, |
340 | * taking into account wraparound issues. |
341 | */ |
342 | static bool pnfs_seqid_is_newer(u32 s1, u32 s2) |
343 | { |
344 | return (s32)(s1 - s2) > 0; |
345 | } |
346 | |
347 | static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq) |
348 | { |
349 | if (pnfs_seqid_is_newer(s1: newseq, s2: lo->plh_barrier) || !lo->plh_barrier) |
350 | lo->plh_barrier = newseq; |
351 | } |
352 | |
353 | static void |
354 | pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode, |
355 | u32 seq) |
356 | { |
357 | if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode) |
358 | iomode = IOMODE_ANY; |
359 | lo->plh_return_iomode = iomode; |
360 | set_bit(nr: NFS_LAYOUT_RETURN_REQUESTED, addr: &lo->plh_flags); |
361 | /* |
362 | * We must set lo->plh_return_seq to avoid livelocks with |
363 | * pnfs_layout_need_return() |
364 | */ |
365 | if (seq == 0) |
366 | seq = be32_to_cpu(lo->plh_stateid.seqid); |
367 | if (!lo->plh_return_seq || pnfs_seqid_is_newer(s1: seq, s2: lo->plh_return_seq)) |
368 | lo->plh_return_seq = seq; |
369 | pnfs_barrier_update(lo, newseq: seq); |
370 | } |
371 | |
372 | static void |
373 | pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo) |
374 | { |
375 | struct pnfs_layout_segment *lseg; |
376 | lo->plh_return_iomode = 0; |
377 | lo->plh_return_seq = 0; |
378 | clear_bit(nr: NFS_LAYOUT_RETURN_REQUESTED, addr: &lo->plh_flags); |
379 | list_for_each_entry(lseg, &lo->plh_segs, pls_list) { |
380 | if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) |
381 | continue; |
382 | pnfs_set_plh_return_info(lo, iomode: lseg->pls_range.iomode, seq: 0); |
383 | } |
384 | } |
385 | |
386 | static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) |
387 | { |
388 | clear_bit_unlock(nr: NFS_LAYOUT_RETURN, addr: &lo->plh_flags); |
389 | clear_bit(nr: NFS_LAYOUT_RETURN_LOCK, addr: &lo->plh_flags); |
390 | smp_mb__after_atomic(); |
391 | wake_up_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_RETURN); |
392 | rpc_wake_up(&NFS_SERVER(inode: lo->plh_inode)->roc_rpcwaitq); |
393 | } |
394 | |
395 | static void |
396 | pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg, |
397 | struct list_head *free_me) |
398 | { |
399 | clear_bit(nr: NFS_LSEG_ROC, addr: &lseg->pls_flags); |
400 | clear_bit(nr: NFS_LSEG_LAYOUTRETURN, addr: &lseg->pls_flags); |
401 | if (test_and_clear_bit(nr: NFS_LSEG_VALID, addr: &lseg->pls_flags)) |
402 | pnfs_lseg_dec_and_remove_zero(lseg, tmp_list: free_me); |
403 | if (test_and_clear_bit(nr: NFS_LSEG_LAYOUTCOMMIT, addr: &lseg->pls_flags)) |
404 | pnfs_lseg_dec_and_remove_zero(lseg, tmp_list: free_me); |
405 | } |
406 | |
407 | /* |
408 | * Update the seqid of a layout stateid after receiving |
409 | * NFS4ERR_OLD_STATEID |
410 | */ |
411 | bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst, |
412 | struct pnfs_layout_range *dst_range, |
413 | struct inode *inode) |
414 | { |
415 | struct pnfs_layout_hdr *lo; |
416 | struct pnfs_layout_range range = { |
417 | .iomode = IOMODE_ANY, |
418 | .offset = 0, |
419 | .length = NFS4_MAX_UINT64, |
420 | }; |
421 | bool ret = false; |
422 | LIST_HEAD(head); |
423 | int err; |
424 | |
425 | spin_lock(lock: &inode->i_lock); |
426 | lo = NFS_I(inode)->layout; |
427 | if (lo && pnfs_layout_is_valid(lo) && |
428 | nfs4_stateid_match_other(dst, src: &lo->plh_stateid)) { |
429 | /* Is our call using the most recent seqid? If so, bump it */ |
430 | if (!nfs4_stateid_is_newer(s1: &lo->plh_stateid, s2: dst)) { |
431 | nfs4_stateid_seqid_inc(s1: dst); |
432 | ret = true; |
433 | goto out; |
434 | } |
435 | /* Try to update the seqid to the most recent */ |
436 | err = pnfs_mark_matching_lsegs_return(lo, tmp_list: &head, recall_range: &range, seq: 0); |
437 | if (err != -EBUSY) { |
438 | dst->seqid = lo->plh_stateid.seqid; |
439 | *dst_range = range; |
440 | ret = true; |
441 | } |
442 | } |
443 | out: |
444 | spin_unlock(lock: &inode->i_lock); |
445 | pnfs_free_lseg_list(tmp_list: &head); |
446 | return ret; |
447 | } |
448 | |
449 | /* |
450 | * Mark a pnfs_layout_hdr and all associated layout segments as invalid |
451 | * |
452 | * In order to continue using the pnfs_layout_hdr, a full recovery |
453 | * is required. |
454 | * Note that caller must hold inode->i_lock. |
455 | */ |
456 | int |
457 | pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, |
458 | struct list_head *lseg_list) |
459 | { |
460 | struct pnfs_layout_range range = { |
461 | .iomode = IOMODE_ANY, |
462 | .offset = 0, |
463 | .length = NFS4_MAX_UINT64, |
464 | }; |
465 | struct pnfs_layout_segment *lseg, *next; |
466 | |
467 | set_bit(nr: NFS_LAYOUT_INVALID_STID, addr: &lo->plh_flags); |
468 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) |
469 | pnfs_clear_lseg_state(lseg, free_me: lseg_list); |
470 | pnfs_clear_layoutreturn_info(lo); |
471 | pnfs_free_returned_lsegs(lo, free_me: lseg_list, range: &range, seq: 0); |
472 | set_bit(nr: NFS_LAYOUT_DRAIN, addr: &lo->plh_flags); |
473 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && |
474 | !test_and_set_bit(nr: NFS_LAYOUT_RETURN_LOCK, addr: &lo->plh_flags)) |
475 | pnfs_clear_layoutreturn_waitbit(lo); |
476 | return !list_empty(head: &lo->plh_segs); |
477 | } |
478 | |
479 | static int |
480 | pnfs_iomode_to_fail_bit(u32 iomode) |
481 | { |
482 | return iomode == IOMODE_RW ? |
483 | NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED; |
484 | } |
485 | |
486 | static void |
487 | pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) |
488 | { |
489 | lo->plh_retry_timestamp = jiffies; |
490 | if (!test_and_set_bit(nr: fail_bit, addr: &lo->plh_flags)) |
491 | refcount_inc(r: &lo->plh_refcount); |
492 | } |
493 | |
494 | static void |
495 | pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) |
496 | { |
497 | if (test_and_clear_bit(nr: fail_bit, addr: &lo->plh_flags)) |
498 | refcount_dec(r: &lo->plh_refcount); |
499 | } |
500 | |
501 | static void |
502 | pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode) |
503 | { |
504 | struct inode *inode = lo->plh_inode; |
505 | struct pnfs_layout_range range = { |
506 | .iomode = iomode, |
507 | .offset = 0, |
508 | .length = NFS4_MAX_UINT64, |
509 | }; |
510 | LIST_HEAD(head); |
511 | |
512 | spin_lock(lock: &inode->i_lock); |
513 | pnfs_layout_set_fail_bit(lo, fail_bit: pnfs_iomode_to_fail_bit(iomode)); |
514 | pnfs_mark_matching_lsegs_return(lo, tmp_list: &head, recall_range: &range, seq: 0); |
515 | spin_unlock(lock: &inode->i_lock); |
516 | pnfs_free_lseg_list(tmp_list: &head); |
517 | dprintk("%s Setting layout IOMODE_%s fail bit\n" , __func__, |
518 | iomode == IOMODE_RW ? "RW" : "READ" ); |
519 | } |
520 | |
521 | static bool |
522 | pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode) |
523 | { |
524 | unsigned long start, end; |
525 | int fail_bit = pnfs_iomode_to_fail_bit(iomode); |
526 | |
527 | if (test_bit(fail_bit, &lo->plh_flags) == 0) |
528 | return false; |
529 | end = jiffies; |
530 | start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT; |
531 | if (!time_in_range(lo->plh_retry_timestamp, start, end)) { |
532 | /* It is time to retry the failed layoutgets */ |
533 | pnfs_layout_clear_fail_bit(lo, fail_bit); |
534 | return false; |
535 | } |
536 | return true; |
537 | } |
538 | |
539 | static void |
540 | pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, |
541 | const struct pnfs_layout_range *range, |
542 | const nfs4_stateid *stateid) |
543 | { |
544 | INIT_LIST_HEAD(list: &lseg->pls_list); |
545 | INIT_LIST_HEAD(list: &lseg->pls_lc_list); |
546 | INIT_LIST_HEAD(list: &lseg->pls_commits); |
547 | refcount_set(r: &lseg->pls_refcount, n: 1); |
548 | set_bit(nr: NFS_LSEG_VALID, addr: &lseg->pls_flags); |
549 | lseg->pls_layout = lo; |
550 | lseg->pls_range = *range; |
551 | lseg->pls_seq = be32_to_cpu(stateid->seqid); |
552 | } |
553 | |
554 | static void pnfs_free_lseg(struct pnfs_layout_segment *lseg) |
555 | { |
556 | if (lseg != NULL) { |
557 | struct inode *inode = lseg->pls_layout->plh_inode; |
558 | NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg); |
559 | } |
560 | } |
561 | |
562 | static void |
563 | pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, |
564 | struct pnfs_layout_segment *lseg) |
565 | { |
566 | WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); |
567 | list_del_init(entry: &lseg->pls_list); |
568 | /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */ |
569 | refcount_dec(r: &lo->plh_refcount); |
570 | if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) |
571 | return; |
572 | if (list_empty(head: &lo->plh_segs) && |
573 | !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) && |
574 | !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { |
575 | if (atomic_read(v: &lo->plh_outstanding) == 0) |
576 | set_bit(nr: NFS_LAYOUT_INVALID_STID, addr: &lo->plh_flags); |
577 | clear_bit(nr: NFS_LAYOUT_BULK_RECALL, addr: &lo->plh_flags); |
578 | } |
579 | } |
580 | |
581 | static bool |
582 | pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo, |
583 | struct pnfs_layout_segment *lseg) |
584 | { |
585 | if (test_and_clear_bit(nr: NFS_LSEG_LAYOUTRETURN, addr: &lseg->pls_flags) && |
586 | pnfs_layout_is_valid(lo)) { |
587 | pnfs_set_plh_return_info(lo, iomode: lseg->pls_range.iomode, seq: 0); |
588 | list_move_tail(list: &lseg->pls_list, head: &lo->plh_return_segs); |
589 | return true; |
590 | } |
591 | return false; |
592 | } |
593 | |
594 | void |
595 | pnfs_put_lseg(struct pnfs_layout_segment *lseg) |
596 | { |
597 | struct pnfs_layout_hdr *lo; |
598 | struct inode *inode; |
599 | |
600 | if (!lseg) |
601 | return; |
602 | |
603 | dprintk("%s: lseg %p ref %d valid %d\n" , __func__, lseg, |
604 | refcount_read(&lseg->pls_refcount), |
605 | test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); |
606 | |
607 | lo = lseg->pls_layout; |
608 | inode = lo->plh_inode; |
609 | |
610 | if (refcount_dec_and_lock(r: &lseg->pls_refcount, lock: &inode->i_lock)) { |
611 | pnfs_get_layout_hdr(lo); |
612 | pnfs_layout_remove_lseg(lo, lseg); |
613 | if (pnfs_cache_lseg_for_layoutreturn(lo, lseg)) |
614 | lseg = NULL; |
615 | spin_unlock(lock: &inode->i_lock); |
616 | pnfs_free_lseg(lseg); |
617 | pnfs_put_layout_hdr(lo); |
618 | } |
619 | } |
620 | EXPORT_SYMBOL_GPL(pnfs_put_lseg); |
621 | |
622 | /* |
623 | * is l2 fully contained in l1? |
624 | * start1 end1 |
625 | * [----------------------------------) |
626 | * start2 end2 |
627 | * [----------------) |
628 | */ |
629 | static bool |
630 | pnfs_lseg_range_contained(const struct pnfs_layout_range *l1, |
631 | const struct pnfs_layout_range *l2) |
632 | { |
633 | u64 start1 = l1->offset; |
634 | u64 end1 = pnfs_end_offset(start: start1, len: l1->length); |
635 | u64 start2 = l2->offset; |
636 | u64 end2 = pnfs_end_offset(start: start2, len: l2->length); |
637 | |
638 | return (start1 <= start2) && (end1 >= end2); |
639 | } |
640 | |
641 | static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, |
642 | struct list_head *tmp_list) |
643 | { |
644 | if (!refcount_dec_and_test(r: &lseg->pls_refcount)) |
645 | return false; |
646 | pnfs_layout_remove_lseg(lo: lseg->pls_layout, lseg); |
647 | list_add(new: &lseg->pls_list, head: tmp_list); |
648 | return true; |
649 | } |
650 | |
651 | /* Returns 1 if lseg is removed from list, 0 otherwise */ |
652 | static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, |
653 | struct list_head *tmp_list) |
654 | { |
655 | int rv = 0; |
656 | |
657 | if (test_and_clear_bit(nr: NFS_LSEG_VALID, addr: &lseg->pls_flags)) { |
658 | /* Remove the reference keeping the lseg in the |
659 | * list. It will now be removed when all |
660 | * outstanding io is finished. |
661 | */ |
662 | dprintk("%s: lseg %p ref %d\n" , __func__, lseg, |
663 | refcount_read(&lseg->pls_refcount)); |
664 | if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) |
665 | rv = 1; |
666 | } |
667 | return rv; |
668 | } |
669 | |
670 | static bool |
671 | pnfs_should_free_range(const struct pnfs_layout_range *lseg_range, |
672 | const struct pnfs_layout_range *recall_range) |
673 | { |
674 | return (recall_range->iomode == IOMODE_ANY || |
675 | lseg_range->iomode == recall_range->iomode) && |
676 | pnfs_lseg_range_intersecting(l1: lseg_range, l2: recall_range); |
677 | } |
678 | |
679 | static bool |
680 | pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg, |
681 | const struct pnfs_layout_range *recall_range, |
682 | u32 seq) |
683 | { |
684 | if (seq != 0 && pnfs_seqid_is_newer(s1: lseg->pls_seq, s2: seq)) |
685 | return false; |
686 | if (recall_range == NULL) |
687 | return true; |
688 | return pnfs_should_free_range(lseg_range: &lseg->pls_range, recall_range); |
689 | } |
690 | |
691 | /** |
692 | * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later |
693 | * @lo: layout header containing the lsegs |
694 | * @tmp_list: list head where doomed lsegs should go |
695 | * @recall_range: optional recall range argument to match (may be NULL) |
696 | * @seq: only invalidate lsegs obtained prior to this sequence (may be 0) |
697 | * |
698 | * Walk the list of lsegs in the layout header, and tear down any that should |
699 | * be destroyed. If "recall_range" is specified then the segment must match |
700 | * that range. If "seq" is non-zero, then only match segments that were handed |
701 | * out at or before that sequence. |
702 | * |
703 | * Returns number of matching invalid lsegs remaining in list after scanning |
704 | * it and purging them. |
705 | */ |
706 | int |
707 | pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, |
708 | struct list_head *tmp_list, |
709 | const struct pnfs_layout_range *recall_range, |
710 | u32 seq) |
711 | { |
712 | struct pnfs_layout_segment *lseg, *next; |
713 | struct nfs_server *server = NFS_SERVER(inode: lo->plh_inode); |
714 | int remaining = 0; |
715 | |
716 | dprintk("%s:Begin lo %p\n" , __func__, lo); |
717 | |
718 | if (list_empty(head: &lo->plh_segs)) |
719 | return 0; |
720 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) |
721 | if (pnfs_match_lseg_recall(lseg, recall_range, seq)) { |
722 | dprintk("%s: freeing lseg %p iomode %d seq %u " |
723 | "offset %llu length %llu\n" , __func__, |
724 | lseg, lseg->pls_range.iomode, lseg->pls_seq, |
725 | lseg->pls_range.offset, lseg->pls_range.length); |
726 | if (mark_lseg_invalid(lseg, tmp_list)) |
727 | continue; |
728 | remaining++; |
729 | pnfs_lseg_cancel_io(server, lseg); |
730 | } |
731 | dprintk("%s:Return %i\n" , __func__, remaining); |
732 | return remaining; |
733 | } |
734 | |
735 | static void |
736 | pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo, |
737 | struct list_head *free_me, |
738 | const struct pnfs_layout_range *range, |
739 | u32 seq) |
740 | { |
741 | struct pnfs_layout_segment *lseg, *next; |
742 | |
743 | list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) { |
744 | if (pnfs_match_lseg_recall(lseg, recall_range: range, seq)) |
745 | list_move_tail(list: &lseg->pls_list, head: free_me); |
746 | } |
747 | } |
748 | |
749 | /* note free_me must contain lsegs from a single layout_hdr */ |
750 | void |
751 | pnfs_free_lseg_list(struct list_head *free_me) |
752 | { |
753 | struct pnfs_layout_segment *lseg, *tmp; |
754 | |
755 | if (list_empty(head: free_me)) |
756 | return; |
757 | |
758 | list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { |
759 | list_del(entry: &lseg->pls_list); |
760 | pnfs_free_lseg(lseg); |
761 | } |
762 | } |
763 | |
764 | static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi) |
765 | { |
766 | struct pnfs_layout_hdr *lo; |
767 | LIST_HEAD(tmp_list); |
768 | |
769 | spin_lock(lock: &nfsi->vfs_inode.i_lock); |
770 | lo = nfsi->layout; |
771 | if (lo) { |
772 | pnfs_get_layout_hdr(lo); |
773 | pnfs_mark_layout_stateid_invalid(lo, lseg_list: &tmp_list); |
774 | pnfs_layout_clear_fail_bit(lo, fail_bit: NFS_LAYOUT_RO_FAILED); |
775 | pnfs_layout_clear_fail_bit(lo, fail_bit: NFS_LAYOUT_RW_FAILED); |
776 | spin_unlock(lock: &nfsi->vfs_inode.i_lock); |
777 | pnfs_free_lseg_list(free_me: &tmp_list); |
778 | nfs_commit_inode(&nfsi->vfs_inode, 0); |
779 | pnfs_put_layout_hdr(lo); |
780 | } else |
781 | spin_unlock(lock: &nfsi->vfs_inode.i_lock); |
782 | return lo; |
783 | } |
784 | |
785 | void pnfs_destroy_layout(struct nfs_inode *nfsi) |
786 | { |
787 | __pnfs_destroy_layout(nfsi); |
788 | } |
789 | EXPORT_SYMBOL_GPL(pnfs_destroy_layout); |
790 | |
791 | static bool pnfs_layout_removed(struct nfs_inode *nfsi, |
792 | struct pnfs_layout_hdr *lo) |
793 | { |
794 | bool ret; |
795 | |
796 | spin_lock(lock: &nfsi->vfs_inode.i_lock); |
797 | ret = nfsi->layout != lo; |
798 | spin_unlock(lock: &nfsi->vfs_inode.i_lock); |
799 | return ret; |
800 | } |
801 | |
802 | void pnfs_destroy_layout_final(struct nfs_inode *nfsi) |
803 | { |
804 | struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi); |
805 | |
806 | if (lo) |
807 | wait_var_event(lo, pnfs_layout_removed(nfsi, lo)); |
808 | } |
809 | |
810 | static bool |
811 | pnfs_layout_add_bulk_destroy_list(struct inode *inode, |
812 | struct list_head *layout_list) |
813 | { |
814 | struct pnfs_layout_hdr *lo; |
815 | bool ret = false; |
816 | |
817 | spin_lock(lock: &inode->i_lock); |
818 | lo = NFS_I(inode)->layout; |
819 | if (lo != NULL && list_empty(head: &lo->plh_bulk_destroy)) { |
820 | pnfs_get_layout_hdr(lo); |
821 | list_add(new: &lo->plh_bulk_destroy, head: layout_list); |
822 | ret = true; |
823 | } |
824 | spin_unlock(lock: &inode->i_lock); |
825 | return ret; |
826 | } |
827 | |
828 | /* Caller must hold rcu_read_lock and clp->cl_lock */ |
829 | static int |
830 | pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, |
831 | struct nfs_server *server, |
832 | struct list_head *layout_list) |
833 | __must_hold(&clp->cl_lock) |
834 | __must_hold(RCU) |
835 | { |
836 | struct pnfs_layout_hdr *lo, *next; |
837 | struct inode *inode; |
838 | |
839 | list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { |
840 | if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || |
841 | test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) || |
842 | !list_empty(head: &lo->plh_bulk_destroy)) |
843 | continue; |
844 | /* If the sb is being destroyed, just bail */ |
845 | if (!nfs_sb_active(sb: server->super)) |
846 | break; |
847 | inode = pnfs_grab_inode_layout_hdr(lo); |
848 | if (inode != NULL) { |
849 | if (test_and_clear_bit(nr: NFS_LAYOUT_HASHED, addr: &lo->plh_flags)) |
850 | list_del_rcu(entry: &lo->plh_layouts); |
851 | if (pnfs_layout_add_bulk_destroy_list(inode, |
852 | layout_list)) |
853 | continue; |
854 | rcu_read_unlock(); |
855 | spin_unlock(lock: &clp->cl_lock); |
856 | iput(inode); |
857 | } else { |
858 | rcu_read_unlock(); |
859 | spin_unlock(lock: &clp->cl_lock); |
860 | } |
861 | nfs_sb_deactive(sb: server->super); |
862 | spin_lock(lock: &clp->cl_lock); |
863 | rcu_read_lock(); |
864 | return -EAGAIN; |
865 | } |
866 | return 0; |
867 | } |
868 | |
869 | static int |
870 | pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list, |
871 | bool is_bulk_recall) |
872 | { |
873 | struct pnfs_layout_hdr *lo; |
874 | struct inode *inode; |
875 | LIST_HEAD(lseg_list); |
876 | int ret = 0; |
877 | |
878 | while (!list_empty(head: layout_list)) { |
879 | lo = list_entry(layout_list->next, struct pnfs_layout_hdr, |
880 | plh_bulk_destroy); |
881 | dprintk("%s freeing layout for inode %lu\n" , __func__, |
882 | lo->plh_inode->i_ino); |
883 | inode = lo->plh_inode; |
884 | |
885 | pnfs_layoutcommit_inode(inode, sync: false); |
886 | |
887 | spin_lock(lock: &inode->i_lock); |
888 | list_del_init(entry: &lo->plh_bulk_destroy); |
889 | if (pnfs_mark_layout_stateid_invalid(lo, lseg_list: &lseg_list)) { |
890 | if (is_bulk_recall) |
891 | set_bit(nr: NFS_LAYOUT_BULK_RECALL, addr: &lo->plh_flags); |
892 | ret = -EAGAIN; |
893 | } |
894 | spin_unlock(lock: &inode->i_lock); |
895 | pnfs_free_lseg_list(free_me: &lseg_list); |
896 | /* Free all lsegs that are attached to commit buckets */ |
897 | nfs_commit_inode(inode, 0); |
898 | pnfs_put_layout_hdr(lo); |
899 | nfs_iput_and_deactive(inode); |
900 | } |
901 | return ret; |
902 | } |
903 | |
904 | int |
905 | pnfs_destroy_layouts_byfsid(struct nfs_client *clp, |
906 | struct nfs_fsid *fsid, |
907 | bool is_recall) |
908 | { |
909 | struct nfs_server *server; |
910 | LIST_HEAD(layout_list); |
911 | |
912 | spin_lock(lock: &clp->cl_lock); |
913 | rcu_read_lock(); |
914 | restart: |
915 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
916 | if (memcmp(p: &server->fsid, q: fsid, size: sizeof(*fsid)) != 0) |
917 | continue; |
918 | if (pnfs_layout_bulk_destroy_byserver_locked(clp, |
919 | server, |
920 | layout_list: &layout_list) != 0) |
921 | goto restart; |
922 | } |
923 | rcu_read_unlock(); |
924 | spin_unlock(lock: &clp->cl_lock); |
925 | |
926 | if (list_empty(head: &layout_list)) |
927 | return 0; |
928 | return pnfs_layout_free_bulk_destroy_list(layout_list: &layout_list, is_bulk_recall: is_recall); |
929 | } |
930 | |
931 | int |
932 | pnfs_destroy_layouts_byclid(struct nfs_client *clp, |
933 | bool is_recall) |
934 | { |
935 | struct nfs_server *server; |
936 | LIST_HEAD(layout_list); |
937 | |
938 | spin_lock(lock: &clp->cl_lock); |
939 | rcu_read_lock(); |
940 | restart: |
941 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
942 | if (pnfs_layout_bulk_destroy_byserver_locked(clp, |
943 | server, |
944 | layout_list: &layout_list) != 0) |
945 | goto restart; |
946 | } |
947 | rcu_read_unlock(); |
948 | spin_unlock(lock: &clp->cl_lock); |
949 | |
950 | if (list_empty(head: &layout_list)) |
951 | return 0; |
952 | return pnfs_layout_free_bulk_destroy_list(layout_list: &layout_list, is_bulk_recall: is_recall); |
953 | } |
954 | |
955 | /* |
956 | * Called by the state manager to remove all layouts established under an |
957 | * expired lease. |
958 | */ |
959 | void |
960 | pnfs_destroy_all_layouts(struct nfs_client *clp) |
961 | { |
962 | nfs4_deviceid_mark_client_invalid(clp); |
963 | nfs4_deviceid_purge_client(clp); |
964 | |
965 | pnfs_destroy_layouts_byclid(clp, is_recall: false); |
966 | } |
967 | |
968 | static void |
969 | pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred) |
970 | { |
971 | const struct cred *old; |
972 | |
973 | if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) { |
974 | old = xchg(&lo->plh_lc_cred, get_cred(cred)); |
975 | put_cred(cred: old); |
976 | } |
977 | } |
978 | |
979 | /* update lo->plh_stateid with new if is more recent */ |
980 | void |
981 | pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, |
982 | const struct cred *cred, bool update_barrier) |
983 | { |
984 | u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid); |
985 | u32 newseq = be32_to_cpu(new->seqid); |
986 | |
987 | if (!pnfs_layout_is_valid(lo)) { |
988 | pnfs_set_layout_cred(lo, cred); |
989 | nfs4_stateid_copy(dst: &lo->plh_stateid, src: new); |
990 | lo->plh_barrier = newseq; |
991 | pnfs_clear_layoutreturn_info(lo); |
992 | clear_bit(nr: NFS_LAYOUT_INVALID_STID, addr: &lo->plh_flags); |
993 | return; |
994 | } |
995 | |
996 | if (pnfs_seqid_is_newer(s1: newseq, s2: oldseq)) |
997 | nfs4_stateid_copy(dst: &lo->plh_stateid, src: new); |
998 | |
999 | if (update_barrier) { |
1000 | pnfs_barrier_update(lo, newseq); |
1001 | return; |
1002 | } |
1003 | /* |
1004 | * Because of wraparound, we want to keep the barrier |
1005 | * "close" to the current seqids. We really only want to |
1006 | * get here from a layoutget call. |
1007 | */ |
1008 | if (atomic_read(v: &lo->plh_outstanding) == 1) |
1009 | pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid)); |
1010 | } |
1011 | |
1012 | static bool |
1013 | pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo, |
1014 | const nfs4_stateid *stateid) |
1015 | { |
1016 | u32 seqid = be32_to_cpu(stateid->seqid); |
1017 | |
1018 | return lo->plh_barrier && pnfs_seqid_is_newer(s1: lo->plh_barrier, s2: seqid); |
1019 | } |
1020 | |
1021 | /* lget is set to 1 if called from inside send_layoutget call chain */ |
1022 | static bool |
1023 | pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo) |
1024 | { |
1025 | return lo->plh_block_lgets || |
1026 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); |
1027 | } |
1028 | |
1029 | static struct nfs_server * |
1030 | pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx) |
1031 | { |
1032 | struct nfs_server *server; |
1033 | |
1034 | if (inode) { |
1035 | server = NFS_SERVER(inode); |
1036 | } else { |
1037 | struct dentry *parent_dir = dget_parent(dentry: ctx->dentry); |
1038 | server = NFS_SERVER(inode: parent_dir->d_inode); |
1039 | dput(parent_dir); |
1040 | } |
1041 | return server; |
1042 | } |
1043 | |
1044 | static void nfs4_free_pages(struct page **pages, size_t size) |
1045 | { |
1046 | int i; |
1047 | |
1048 | if (!pages) |
1049 | return; |
1050 | |
1051 | for (i = 0; i < size; i++) { |
1052 | if (!pages[i]) |
1053 | break; |
1054 | __free_page(pages[i]); |
1055 | } |
1056 | kfree(objp: pages); |
1057 | } |
1058 | |
1059 | static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) |
1060 | { |
1061 | struct page **pages; |
1062 | int i; |
1063 | |
1064 | pages = kmalloc_array(n: size, size: sizeof(struct page *), flags: gfp_flags); |
1065 | if (!pages) { |
1066 | dprintk("%s: can't alloc array of %zu pages\n" , __func__, size); |
1067 | return NULL; |
1068 | } |
1069 | |
1070 | for (i = 0; i < size; i++) { |
1071 | pages[i] = alloc_page(gfp_flags); |
1072 | if (!pages[i]) { |
1073 | dprintk("%s: failed to allocate page\n" , __func__); |
1074 | nfs4_free_pages(pages, size: i); |
1075 | return NULL; |
1076 | } |
1077 | } |
1078 | |
1079 | return pages; |
1080 | } |
1081 | |
1082 | static struct nfs4_layoutget * |
1083 | pnfs_alloc_init_layoutget_args(struct inode *ino, |
1084 | struct nfs_open_context *ctx, |
1085 | const nfs4_stateid *stateid, |
1086 | const struct pnfs_layout_range *range, |
1087 | gfp_t gfp_flags) |
1088 | { |
1089 | struct nfs_server *server = pnfs_find_server(inode: ino, ctx); |
1090 | size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response; |
1091 | size_t max_pages = max_response_pages(server); |
1092 | struct nfs4_layoutget *lgp; |
1093 | |
1094 | dprintk("--> %s\n" , __func__); |
1095 | |
1096 | lgp = kzalloc(size: sizeof(*lgp), flags: gfp_flags); |
1097 | if (lgp == NULL) |
1098 | return NULL; |
1099 | |
1100 | if (max_reply_sz) { |
1101 | size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1102 | if (npages < max_pages) |
1103 | max_pages = npages; |
1104 | } |
1105 | |
1106 | lgp->args.layout.pages = nfs4_alloc_pages(size: max_pages, gfp_flags); |
1107 | if (!lgp->args.layout.pages) { |
1108 | kfree(objp: lgp); |
1109 | return NULL; |
1110 | } |
1111 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; |
1112 | lgp->res.layoutp = &lgp->args.layout; |
1113 | |
1114 | /* Don't confuse uninitialised result and success */ |
1115 | lgp->res.status = -NFS4ERR_DELAY; |
1116 | |
1117 | lgp->args.minlength = PAGE_SIZE; |
1118 | if (lgp->args.minlength > range->length) |
1119 | lgp->args.minlength = range->length; |
1120 | if (ino) { |
1121 | loff_t i_size = i_size_read(inode: ino); |
1122 | |
1123 | if (range->iomode == IOMODE_READ) { |
1124 | if (range->offset >= i_size) |
1125 | lgp->args.minlength = 0; |
1126 | else if (i_size - range->offset < lgp->args.minlength) |
1127 | lgp->args.minlength = i_size - range->offset; |
1128 | } |
1129 | } |
1130 | lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; |
1131 | pnfs_copy_range(dst: &lgp->args.range, src: range); |
1132 | lgp->args.type = server->pnfs_curr_ld->id; |
1133 | lgp->args.inode = ino; |
1134 | lgp->args.ctx = get_nfs_open_context(ctx); |
1135 | nfs4_stateid_copy(dst: &lgp->args.stateid, src: stateid); |
1136 | lgp->gfp_flags = gfp_flags; |
1137 | lgp->cred = ctx->cred; |
1138 | return lgp; |
1139 | } |
1140 | |
1141 | void pnfs_layoutget_free(struct nfs4_layoutget *lgp) |
1142 | { |
1143 | size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE; |
1144 | |
1145 | nfs4_free_pages(pages: lgp->args.layout.pages, size: max_pages); |
1146 | pnfs_put_layout_hdr(lo: lgp->lo); |
1147 | put_nfs_open_context(ctx: lgp->args.ctx); |
1148 | kfree(objp: lgp); |
1149 | } |
1150 | |
1151 | static void pnfs_clear_layoutcommit(struct inode *inode, |
1152 | struct list_head *head) |
1153 | { |
1154 | struct nfs_inode *nfsi = NFS_I(inode); |
1155 | struct pnfs_layout_segment *lseg, *tmp; |
1156 | |
1157 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, addr: &nfsi->flags)) |
1158 | return; |
1159 | list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { |
1160 | if (!test_and_clear_bit(nr: NFS_LSEG_LAYOUTCOMMIT, addr: &lseg->pls_flags)) |
1161 | continue; |
1162 | pnfs_lseg_dec_and_remove_zero(lseg, tmp_list: head); |
1163 | } |
1164 | } |
1165 | |
1166 | void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, |
1167 | const nfs4_stateid *arg_stateid, |
1168 | const struct pnfs_layout_range *range, |
1169 | const nfs4_stateid *stateid) |
1170 | { |
1171 | struct inode *inode = lo->plh_inode; |
1172 | LIST_HEAD(freeme); |
1173 | |
1174 | spin_lock(lock: &inode->i_lock); |
1175 | if (!pnfs_layout_is_valid(lo) || |
1176 | !nfs4_stateid_match_other(dst: &lo->plh_stateid, src: arg_stateid)) |
1177 | goto out_unlock; |
1178 | if (stateid) { |
1179 | u32 seq = be32_to_cpu(arg_stateid->seqid); |
1180 | |
1181 | pnfs_mark_matching_lsegs_invalid(lo, tmp_list: &freeme, recall_range: range, seq); |
1182 | pnfs_free_returned_lsegs(lo, free_me: &freeme, range, seq); |
1183 | pnfs_set_layout_stateid(lo, new: stateid, NULL, update_barrier: true); |
1184 | } else |
1185 | pnfs_mark_layout_stateid_invalid(lo, lseg_list: &freeme); |
1186 | out_unlock: |
1187 | pnfs_clear_layoutreturn_waitbit(lo); |
1188 | spin_unlock(lock: &inode->i_lock); |
1189 | pnfs_free_lseg_list(free_me: &freeme); |
1190 | |
1191 | } |
1192 | |
1193 | static bool |
1194 | pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo, |
1195 | nfs4_stateid *stateid, |
1196 | const struct cred **cred, |
1197 | enum pnfs_iomode *iomode) |
1198 | { |
1199 | /* Serialise LAYOUTGET/LAYOUTRETURN */ |
1200 | if (atomic_read(v: &lo->plh_outstanding) != 0) |
1201 | return false; |
1202 | if (test_and_set_bit(nr: NFS_LAYOUT_RETURN_LOCK, addr: &lo->plh_flags)) |
1203 | return false; |
1204 | set_bit(nr: NFS_LAYOUT_RETURN, addr: &lo->plh_flags); |
1205 | pnfs_get_layout_hdr(lo); |
1206 | nfs4_stateid_copy(dst: stateid, src: &lo->plh_stateid); |
1207 | *cred = get_cred(cred: lo->plh_lc_cred); |
1208 | if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) { |
1209 | if (lo->plh_return_seq != 0) |
1210 | stateid->seqid = cpu_to_be32(lo->plh_return_seq); |
1211 | if (iomode != NULL) |
1212 | *iomode = lo->plh_return_iomode; |
1213 | pnfs_clear_layoutreturn_info(lo); |
1214 | } else if (iomode != NULL) |
1215 | *iomode = IOMODE_ANY; |
1216 | pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid)); |
1217 | return true; |
1218 | } |
1219 | |
1220 | static void |
1221 | pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args, |
1222 | struct pnfs_layout_hdr *lo, |
1223 | const nfs4_stateid *stateid, |
1224 | enum pnfs_iomode iomode) |
1225 | { |
1226 | struct inode *inode = lo->plh_inode; |
1227 | |
1228 | args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id; |
1229 | args->inode = inode; |
1230 | args->range.iomode = iomode; |
1231 | args->range.offset = 0; |
1232 | args->range.length = NFS4_MAX_UINT64; |
1233 | args->layout = lo; |
1234 | nfs4_stateid_copy(dst: &args->stateid, src: stateid); |
1235 | } |
1236 | |
1237 | static int |
1238 | pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, |
1239 | const nfs4_stateid *stateid, |
1240 | const struct cred **pcred, |
1241 | enum pnfs_iomode iomode, |
1242 | bool sync) |
1243 | { |
1244 | struct inode *ino = lo->plh_inode; |
1245 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode: ino)->pnfs_curr_ld; |
1246 | struct nfs4_layoutreturn *lrp; |
1247 | const struct cred *cred = *pcred; |
1248 | int status = 0; |
1249 | |
1250 | *pcred = NULL; |
1251 | lrp = kzalloc(size: sizeof(*lrp), flags: nfs_io_gfp_mask()); |
1252 | if (unlikely(lrp == NULL)) { |
1253 | status = -ENOMEM; |
1254 | spin_lock(lock: &ino->i_lock); |
1255 | pnfs_clear_layoutreturn_waitbit(lo); |
1256 | spin_unlock(lock: &ino->i_lock); |
1257 | put_cred(cred); |
1258 | pnfs_put_layout_hdr(lo); |
1259 | goto out; |
1260 | } |
1261 | |
1262 | pnfs_init_layoutreturn_args(args: &lrp->args, lo, stateid, iomode); |
1263 | lrp->args.ld_private = &lrp->ld_private; |
1264 | lrp->clp = NFS_SERVER(inode: ino)->nfs_client; |
1265 | lrp->cred = cred; |
1266 | if (ld->prepare_layoutreturn) |
1267 | ld->prepare_layoutreturn(&lrp->args); |
1268 | |
1269 | status = nfs4_proc_layoutreturn(lrp, sync); |
1270 | out: |
1271 | dprintk("<-- %s status: %d\n" , __func__, status); |
1272 | return status; |
1273 | } |
1274 | |
1275 | static bool |
1276 | pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, |
1277 | enum pnfs_iomode iomode, |
1278 | u32 seq) |
1279 | { |
1280 | struct pnfs_layout_range recall_range = { |
1281 | .length = NFS4_MAX_UINT64, |
1282 | .iomode = iomode, |
1283 | }; |
1284 | return pnfs_mark_matching_lsegs_return(lo, tmp_list: &lo->plh_return_segs, |
1285 | recall_range: &recall_range, seq) != -EBUSY; |
1286 | } |
1287 | |
1288 | /* Return true if layoutreturn is needed */ |
1289 | static bool |
1290 | pnfs_layout_need_return(struct pnfs_layout_hdr *lo) |
1291 | { |
1292 | if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) |
1293 | return false; |
1294 | return pnfs_layout_segments_returnable(lo, iomode: lo->plh_return_iomode, |
1295 | seq: lo->plh_return_seq); |
1296 | } |
1297 | |
1298 | static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) |
1299 | { |
1300 | struct inode *inode= lo->plh_inode; |
1301 | |
1302 | if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) |
1303 | return; |
1304 | spin_lock(lock: &inode->i_lock); |
1305 | if (pnfs_layout_need_return(lo)) { |
1306 | const struct cred *cred; |
1307 | nfs4_stateid stateid; |
1308 | enum pnfs_iomode iomode; |
1309 | bool send; |
1310 | |
1311 | send = pnfs_prepare_layoutreturn(lo, stateid: &stateid, cred: &cred, iomode: &iomode); |
1312 | spin_unlock(lock: &inode->i_lock); |
1313 | if (send) { |
1314 | /* Send an async layoutreturn so we dont deadlock */ |
1315 | pnfs_send_layoutreturn(lo, stateid: &stateid, pcred: &cred, iomode, sync: false); |
1316 | } |
1317 | } else |
1318 | spin_unlock(lock: &inode->i_lock); |
1319 | } |
1320 | |
1321 | /* |
1322 | * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr |
1323 | * when the layout segment list is empty. |
1324 | * |
1325 | * Note that a pnfs_layout_hdr can exist with an empty layout segment |
1326 | * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the |
1327 | * deviceid is marked invalid. |
1328 | */ |
1329 | int |
1330 | _pnfs_return_layout(struct inode *ino) |
1331 | { |
1332 | struct pnfs_layout_hdr *lo = NULL; |
1333 | struct nfs_inode *nfsi = NFS_I(inode: ino); |
1334 | struct pnfs_layout_range range = { |
1335 | .iomode = IOMODE_ANY, |
1336 | .offset = 0, |
1337 | .length = NFS4_MAX_UINT64, |
1338 | }; |
1339 | LIST_HEAD(tmp_list); |
1340 | const struct cred *cred; |
1341 | nfs4_stateid stateid; |
1342 | int status = 0; |
1343 | bool send, valid_layout; |
1344 | |
1345 | dprintk("NFS: %s for inode %lu\n" , __func__, ino->i_ino); |
1346 | |
1347 | spin_lock(lock: &ino->i_lock); |
1348 | lo = nfsi->layout; |
1349 | if (!lo) { |
1350 | spin_unlock(lock: &ino->i_lock); |
1351 | dprintk("NFS: %s no layout to return\n" , __func__); |
1352 | goto out; |
1353 | } |
1354 | /* Reference matched in nfs4_layoutreturn_release */ |
1355 | pnfs_get_layout_hdr(lo); |
1356 | /* Is there an outstanding layoutreturn ? */ |
1357 | if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) { |
1358 | spin_unlock(lock: &ino->i_lock); |
1359 | if (wait_on_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_RETURN, |
1360 | TASK_UNINTERRUPTIBLE)) |
1361 | goto out_put_layout_hdr; |
1362 | spin_lock(lock: &ino->i_lock); |
1363 | } |
1364 | valid_layout = pnfs_layout_is_valid(lo); |
1365 | pnfs_clear_layoutcommit(inode: ino, head: &tmp_list); |
1366 | pnfs_mark_matching_lsegs_return(lo, tmp_list: &tmp_list, recall_range: &range, seq: 0); |
1367 | |
1368 | if (NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range) |
1369 | NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range(lo, &range); |
1370 | |
1371 | /* Don't send a LAYOUTRETURN if list was initially empty */ |
1372 | if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) || |
1373 | !valid_layout) { |
1374 | spin_unlock(lock: &ino->i_lock); |
1375 | dprintk("NFS: %s no layout segments to return\n" , __func__); |
1376 | goto out_wait_layoutreturn; |
1377 | } |
1378 | |
1379 | send = pnfs_prepare_layoutreturn(lo, stateid: &stateid, cred: &cred, NULL); |
1380 | spin_unlock(lock: &ino->i_lock); |
1381 | if (send) |
1382 | status = pnfs_send_layoutreturn(lo, stateid: &stateid, pcred: &cred, iomode: IOMODE_ANY, sync: true); |
1383 | out_wait_layoutreturn: |
1384 | wait_on_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE); |
1385 | out_put_layout_hdr: |
1386 | pnfs_free_lseg_list(free_me: &tmp_list); |
1387 | pnfs_put_layout_hdr(lo); |
1388 | out: |
1389 | dprintk("<-- %s status: %d\n" , __func__, status); |
1390 | return status; |
1391 | } |
1392 | |
1393 | int |
1394 | pnfs_commit_and_return_layout(struct inode *inode) |
1395 | { |
1396 | struct pnfs_layout_hdr *lo; |
1397 | int ret; |
1398 | |
1399 | spin_lock(lock: &inode->i_lock); |
1400 | lo = NFS_I(inode)->layout; |
1401 | if (lo == NULL) { |
1402 | spin_unlock(lock: &inode->i_lock); |
1403 | return 0; |
1404 | } |
1405 | pnfs_get_layout_hdr(lo); |
1406 | /* Block new layoutgets and read/write to ds */ |
1407 | lo->plh_block_lgets++; |
1408 | spin_unlock(lock: &inode->i_lock); |
1409 | filemap_fdatawait(mapping: inode->i_mapping); |
1410 | ret = pnfs_layoutcommit_inode(inode, sync: true); |
1411 | if (ret == 0) |
1412 | ret = _pnfs_return_layout(ino: inode); |
1413 | spin_lock(lock: &inode->i_lock); |
1414 | lo->plh_block_lgets--; |
1415 | spin_unlock(lock: &inode->i_lock); |
1416 | pnfs_put_layout_hdr(lo); |
1417 | return ret; |
1418 | } |
1419 | |
1420 | bool pnfs_roc(struct inode *ino, |
1421 | struct nfs4_layoutreturn_args *args, |
1422 | struct nfs4_layoutreturn_res *res, |
1423 | const struct cred *cred) |
1424 | { |
1425 | struct nfs_inode *nfsi = NFS_I(inode: ino); |
1426 | struct nfs_open_context *ctx; |
1427 | struct nfs4_state *state; |
1428 | struct pnfs_layout_hdr *lo; |
1429 | struct pnfs_layout_segment *lseg, *next; |
1430 | const struct cred *lc_cred; |
1431 | nfs4_stateid stateid; |
1432 | enum pnfs_iomode iomode = 0; |
1433 | bool layoutreturn = false, roc = false; |
1434 | bool skip_read = false; |
1435 | |
1436 | if (!nfs_have_layout(inode: ino)) |
1437 | return false; |
1438 | retry: |
1439 | rcu_read_lock(); |
1440 | spin_lock(lock: &ino->i_lock); |
1441 | lo = nfsi->layout; |
1442 | if (!lo || !pnfs_layout_is_valid(lo) || |
1443 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
1444 | lo = NULL; |
1445 | goto out_noroc; |
1446 | } |
1447 | pnfs_get_layout_hdr(lo); |
1448 | if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) { |
1449 | spin_unlock(lock: &ino->i_lock); |
1450 | rcu_read_unlock(); |
1451 | wait_on_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_RETURN, |
1452 | TASK_UNINTERRUPTIBLE); |
1453 | pnfs_put_layout_hdr(lo); |
1454 | goto retry; |
1455 | } |
1456 | |
1457 | /* no roc if we hold a delegation */ |
1458 | if (nfs4_check_delegation(inode: ino, FMODE_READ)) { |
1459 | if (nfs4_check_delegation(inode: ino, FMODE_WRITE)) |
1460 | goto out_noroc; |
1461 | skip_read = true; |
1462 | } |
1463 | |
1464 | list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { |
1465 | state = ctx->state; |
1466 | if (state == NULL) |
1467 | continue; |
1468 | /* Don't return layout if there is open file state */ |
1469 | if (state->state & FMODE_WRITE) |
1470 | goto out_noroc; |
1471 | if (state->state & FMODE_READ) |
1472 | skip_read = true; |
1473 | } |
1474 | |
1475 | |
1476 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) { |
1477 | if (skip_read && lseg->pls_range.iomode == IOMODE_READ) |
1478 | continue; |
1479 | /* If we are sending layoutreturn, invalidate all valid lsegs */ |
1480 | if (!test_and_clear_bit(nr: NFS_LSEG_ROC, addr: &lseg->pls_flags)) |
1481 | continue; |
1482 | /* |
1483 | * Note: mark lseg for return so pnfs_layout_remove_lseg |
1484 | * doesn't invalidate the layout for us. |
1485 | */ |
1486 | set_bit(nr: NFS_LSEG_LAYOUTRETURN, addr: &lseg->pls_flags); |
1487 | if (!mark_lseg_invalid(lseg, tmp_list: &lo->plh_return_segs)) |
1488 | continue; |
1489 | pnfs_set_plh_return_info(lo, iomode: lseg->pls_range.iomode, seq: 0); |
1490 | } |
1491 | |
1492 | if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) |
1493 | goto out_noroc; |
1494 | |
1495 | /* ROC in two conditions: |
1496 | * 1. there are ROC lsegs |
1497 | * 2. we don't send layoutreturn |
1498 | */ |
1499 | /* lo ref dropped in pnfs_roc_release() */ |
1500 | layoutreturn = pnfs_prepare_layoutreturn(lo, stateid: &stateid, cred: &lc_cred, iomode: &iomode); |
1501 | /* If the creds don't match, we can't compound the layoutreturn */ |
1502 | if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0) |
1503 | goto out_noroc; |
1504 | |
1505 | roc = layoutreturn; |
1506 | pnfs_init_layoutreturn_args(args, lo, stateid: &stateid, iomode); |
1507 | res->lrs_present = 0; |
1508 | layoutreturn = false; |
1509 | put_cred(cred: lc_cred); |
1510 | |
1511 | out_noroc: |
1512 | spin_unlock(lock: &ino->i_lock); |
1513 | rcu_read_unlock(); |
1514 | pnfs_layoutcommit_inode(inode: ino, sync: true); |
1515 | if (roc) { |
1516 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode: ino)->pnfs_curr_ld; |
1517 | if (ld->prepare_layoutreturn) |
1518 | ld->prepare_layoutreturn(args); |
1519 | pnfs_put_layout_hdr(lo); |
1520 | return true; |
1521 | } |
1522 | if (layoutreturn) |
1523 | pnfs_send_layoutreturn(lo, stateid: &stateid, pcred: &lc_cred, iomode, sync: true); |
1524 | pnfs_put_layout_hdr(lo); |
1525 | return false; |
1526 | } |
1527 | |
1528 | int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, |
1529 | struct nfs4_layoutreturn_res **respp, int *ret) |
1530 | { |
1531 | struct nfs4_layoutreturn_args *arg = *argpp; |
1532 | int retval = -EAGAIN; |
1533 | |
1534 | if (!arg) |
1535 | return 0; |
1536 | /* Handle Layoutreturn errors */ |
1537 | switch (*ret) { |
1538 | case 0: |
1539 | retval = 0; |
1540 | break; |
1541 | case -NFS4ERR_NOMATCHING_LAYOUT: |
1542 | /* Was there an RPC level error? If not, retry */ |
1543 | if (task->tk_rpc_status == 0) |
1544 | break; |
1545 | /* If the call was not sent, let caller handle it */ |
1546 | if (!RPC_WAS_SENT(task)) |
1547 | return 0; |
1548 | /* |
1549 | * Otherwise, assume the call succeeded and |
1550 | * that we need to release the layout |
1551 | */ |
1552 | *ret = 0; |
1553 | (*respp)->lrs_present = 0; |
1554 | retval = 0; |
1555 | break; |
1556 | case -NFS4ERR_DELAY: |
1557 | /* Let the caller handle the retry */ |
1558 | *ret = -NFS4ERR_NOMATCHING_LAYOUT; |
1559 | return 0; |
1560 | case -NFS4ERR_OLD_STATEID: |
1561 | if (!nfs4_layout_refresh_old_stateid(dst: &arg->stateid, |
1562 | dst_range: &arg->range, inode: arg->inode)) |
1563 | break; |
1564 | *ret = -NFS4ERR_NOMATCHING_LAYOUT; |
1565 | return -EAGAIN; |
1566 | } |
1567 | *argpp = NULL; |
1568 | *respp = NULL; |
1569 | return retval; |
1570 | } |
1571 | |
1572 | void pnfs_roc_release(struct nfs4_layoutreturn_args *args, |
1573 | struct nfs4_layoutreturn_res *res, |
1574 | int ret) |
1575 | { |
1576 | struct pnfs_layout_hdr *lo = args->layout; |
1577 | struct inode *inode = args->inode; |
1578 | const nfs4_stateid *res_stateid = NULL; |
1579 | struct nfs4_xdr_opaque_data *ld_private = args->ld_private; |
1580 | |
1581 | switch (ret) { |
1582 | case -NFS4ERR_NOMATCHING_LAYOUT: |
1583 | spin_lock(lock: &inode->i_lock); |
1584 | if (pnfs_layout_is_valid(lo) && |
1585 | nfs4_stateid_match_other(dst: &args->stateid, src: &lo->plh_stateid)) |
1586 | pnfs_set_plh_return_info(lo, iomode: args->range.iomode, seq: 0); |
1587 | pnfs_clear_layoutreturn_waitbit(lo); |
1588 | spin_unlock(lock: &inode->i_lock); |
1589 | break; |
1590 | case 0: |
1591 | if (res->lrs_present) |
1592 | res_stateid = &res->stateid; |
1593 | fallthrough; |
1594 | default: |
1595 | pnfs_layoutreturn_free_lsegs(lo, arg_stateid: &args->stateid, range: &args->range, |
1596 | stateid: res_stateid); |
1597 | } |
1598 | trace_nfs4_layoutreturn_on_close(inode: args->inode, stateid: &args->stateid, error: ret); |
1599 | if (ld_private && ld_private->ops && ld_private->ops->free) |
1600 | ld_private->ops->free(ld_private); |
1601 | pnfs_put_layout_hdr(lo); |
1602 | } |
1603 | |
1604 | bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) |
1605 | { |
1606 | struct nfs_inode *nfsi = NFS_I(inode: ino); |
1607 | struct pnfs_layout_hdr *lo; |
1608 | bool sleep = false; |
1609 | |
1610 | /* we might not have grabbed lo reference. so need to check under |
1611 | * i_lock */ |
1612 | spin_lock(lock: &ino->i_lock); |
1613 | lo = nfsi->layout; |
1614 | if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { |
1615 | rpc_sleep_on(&NFS_SERVER(inode: ino)->roc_rpcwaitq, task, NULL); |
1616 | sleep = true; |
1617 | } |
1618 | spin_unlock(lock: &ino->i_lock); |
1619 | return sleep; |
1620 | } |
1621 | |
1622 | /* |
1623 | * Compare two layout segments for sorting into layout cache. |
1624 | * We want to preferentially return RW over RO layouts, so ensure those |
1625 | * are seen first. |
1626 | */ |
1627 | static s64 |
1628 | pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1, |
1629 | const struct pnfs_layout_range *l2) |
1630 | { |
1631 | s64 d; |
1632 | |
1633 | /* high offset > low offset */ |
1634 | d = l1->offset - l2->offset; |
1635 | if (d) |
1636 | return d; |
1637 | |
1638 | /* short length > long length */ |
1639 | d = l2->length - l1->length; |
1640 | if (d) |
1641 | return d; |
1642 | |
1643 | /* read > read/write */ |
1644 | return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); |
1645 | } |
1646 | |
1647 | static bool |
1648 | pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1, |
1649 | const struct pnfs_layout_range *l2) |
1650 | { |
1651 | return pnfs_lseg_range_cmp(l1, l2) > 0; |
1652 | } |
1653 | |
1654 | static bool |
1655 | pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg, |
1656 | struct pnfs_layout_segment *old) |
1657 | { |
1658 | return false; |
1659 | } |
1660 | |
1661 | void |
1662 | pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo, |
1663 | struct pnfs_layout_segment *lseg, |
1664 | bool (*is_after)(const struct pnfs_layout_range *, |
1665 | const struct pnfs_layout_range *), |
1666 | bool (*do_merge)(struct pnfs_layout_segment *, |
1667 | struct pnfs_layout_segment *), |
1668 | struct list_head *free_me) |
1669 | { |
1670 | struct pnfs_layout_segment *lp, *tmp; |
1671 | |
1672 | dprintk("%s:Begin\n" , __func__); |
1673 | |
1674 | list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) { |
1675 | if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0) |
1676 | continue; |
1677 | if (do_merge(lseg, lp)) { |
1678 | mark_lseg_invalid(lseg: lp, tmp_list: free_me); |
1679 | continue; |
1680 | } |
1681 | if (is_after(&lseg->pls_range, &lp->pls_range)) |
1682 | continue; |
1683 | list_add_tail(new: &lseg->pls_list, head: &lp->pls_list); |
1684 | dprintk("%s: inserted lseg %p " |
1685 | "iomode %d offset %llu length %llu before " |
1686 | "lp %p iomode %d offset %llu length %llu\n" , |
1687 | __func__, lseg, lseg->pls_range.iomode, |
1688 | lseg->pls_range.offset, lseg->pls_range.length, |
1689 | lp, lp->pls_range.iomode, lp->pls_range.offset, |
1690 | lp->pls_range.length); |
1691 | goto out; |
1692 | } |
1693 | list_add_tail(new: &lseg->pls_list, head: &lo->plh_segs); |
1694 | dprintk("%s: inserted lseg %p " |
1695 | "iomode %d offset %llu length %llu at tail\n" , |
1696 | __func__, lseg, lseg->pls_range.iomode, |
1697 | lseg->pls_range.offset, lseg->pls_range.length); |
1698 | out: |
1699 | pnfs_get_layout_hdr(lo); |
1700 | |
1701 | dprintk("%s:Return\n" , __func__); |
1702 | } |
1703 | EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg); |
1704 | |
1705 | static void |
1706 | pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo, |
1707 | struct pnfs_layout_segment *lseg, |
1708 | struct list_head *free_me) |
1709 | { |
1710 | struct inode *inode = lo->plh_inode; |
1711 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; |
1712 | |
1713 | if (ld->add_lseg != NULL) |
1714 | ld->add_lseg(lo, lseg, free_me); |
1715 | else |
1716 | pnfs_generic_layout_insert_lseg(lo, lseg, |
1717 | pnfs_lseg_range_is_after, |
1718 | pnfs_lseg_no_merge, |
1719 | free_me); |
1720 | } |
1721 | |
1722 | static struct pnfs_layout_hdr * |
1723 | alloc_init_layout_hdr(struct inode *ino, |
1724 | struct nfs_open_context *ctx, |
1725 | gfp_t gfp_flags) |
1726 | { |
1727 | struct pnfs_layout_hdr *lo; |
1728 | |
1729 | lo = pnfs_alloc_layout_hdr(ino, gfp_flags); |
1730 | if (!lo) |
1731 | return NULL; |
1732 | refcount_set(r: &lo->plh_refcount, n: 1); |
1733 | INIT_LIST_HEAD(list: &lo->plh_layouts); |
1734 | INIT_LIST_HEAD(list: &lo->plh_segs); |
1735 | INIT_LIST_HEAD(list: &lo->plh_return_segs); |
1736 | INIT_LIST_HEAD(list: &lo->plh_bulk_destroy); |
1737 | lo->plh_inode = ino; |
1738 | lo->plh_lc_cred = get_cred(cred: ctx->cred); |
1739 | lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID; |
1740 | return lo; |
1741 | } |
1742 | |
1743 | static struct pnfs_layout_hdr * |
1744 | pnfs_find_alloc_layout(struct inode *ino, |
1745 | struct nfs_open_context *ctx, |
1746 | gfp_t gfp_flags) |
1747 | __releases(&ino->i_lock) |
1748 | __acquires(&ino->i_lock) |
1749 | { |
1750 | struct nfs_inode *nfsi = NFS_I(inode: ino); |
1751 | struct pnfs_layout_hdr *new = NULL; |
1752 | |
1753 | dprintk("%s Begin ino=%p layout=%p\n" , __func__, ino, nfsi->layout); |
1754 | |
1755 | if (nfsi->layout != NULL) |
1756 | goto out_existing; |
1757 | spin_unlock(lock: &ino->i_lock); |
1758 | new = alloc_init_layout_hdr(ino, ctx, gfp_flags); |
1759 | spin_lock(lock: &ino->i_lock); |
1760 | |
1761 | if (likely(nfsi->layout == NULL)) { /* Won the race? */ |
1762 | nfsi->layout = new; |
1763 | return new; |
1764 | } else if (new != NULL) |
1765 | pnfs_free_layout_hdr(lo: new); |
1766 | out_existing: |
1767 | pnfs_get_layout_hdr(lo: nfsi->layout); |
1768 | return nfsi->layout; |
1769 | } |
1770 | |
1771 | /* |
1772 | * iomode matching rules: |
1773 | * iomode lseg strict match |
1774 | * iomode |
1775 | * ----- ----- ------ ----- |
1776 | * ANY READ N/A true |
1777 | * ANY RW N/A true |
1778 | * RW READ N/A false |
1779 | * RW RW N/A true |
1780 | * READ READ N/A true |
1781 | * READ RW true false |
1782 | * READ RW false true |
1783 | */ |
1784 | static bool |
1785 | pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range, |
1786 | const struct pnfs_layout_range *range, |
1787 | bool strict_iomode) |
1788 | { |
1789 | struct pnfs_layout_range range1; |
1790 | |
1791 | if ((range->iomode == IOMODE_RW && |
1792 | ls_range->iomode != IOMODE_RW) || |
1793 | (range->iomode != ls_range->iomode && |
1794 | strict_iomode) || |
1795 | !pnfs_lseg_range_intersecting(l1: ls_range, l2: range)) |
1796 | return false; |
1797 | |
1798 | /* range1 covers only the first byte in the range */ |
1799 | range1 = *range; |
1800 | range1.length = 1; |
1801 | return pnfs_lseg_range_contained(l1: ls_range, l2: &range1); |
1802 | } |
1803 | |
1804 | /* |
1805 | * lookup range in layout |
1806 | */ |
1807 | static struct pnfs_layout_segment * |
1808 | pnfs_find_lseg(struct pnfs_layout_hdr *lo, |
1809 | struct pnfs_layout_range *range, |
1810 | bool strict_iomode) |
1811 | { |
1812 | struct pnfs_layout_segment *lseg, *ret = NULL; |
1813 | |
1814 | dprintk("%s:Begin\n" , __func__); |
1815 | |
1816 | list_for_each_entry(lseg, &lo->plh_segs, pls_list) { |
1817 | if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && |
1818 | pnfs_lseg_range_match(ls_range: &lseg->pls_range, range, |
1819 | strict_iomode)) { |
1820 | ret = pnfs_get_lseg(lseg); |
1821 | break; |
1822 | } |
1823 | } |
1824 | |
1825 | dprintk("%s:Return lseg %p ref %d\n" , |
1826 | __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0); |
1827 | return ret; |
1828 | } |
1829 | |
1830 | /* |
1831 | * Use mdsthreshold hints set at each OPEN to determine if I/O should go |
1832 | * to the MDS or over pNFS |
1833 | * |
1834 | * The nfs_inode read_io and write_io fields are cumulative counters reset |
1835 | * when there are no layout segments. Note that in pnfs_update_layout iomode |
1836 | * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a |
1837 | * WRITE request. |
1838 | * |
1839 | * A return of true means use MDS I/O. |
1840 | * |
1841 | * From rfc 5661: |
1842 | * If a file's size is smaller than the file size threshold, data accesses |
1843 | * SHOULD be sent to the metadata server. If an I/O request has a length that |
1844 | * is below the I/O size threshold, the I/O SHOULD be sent to the metadata |
1845 | * server. If both file size and I/O size are provided, the client SHOULD |
1846 | * reach or exceed both thresholds before sending its read or write |
1847 | * requests to the data server. |
1848 | */ |
1849 | static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, |
1850 | struct inode *ino, int iomode) |
1851 | { |
1852 | struct nfs4_threshold *t = ctx->mdsthreshold; |
1853 | struct nfs_inode *nfsi = NFS_I(inode: ino); |
1854 | loff_t fsize = i_size_read(inode: ino); |
1855 | bool size = false, size_set = false, io = false, io_set = false, ret = false; |
1856 | |
1857 | if (t == NULL) |
1858 | return ret; |
1859 | |
1860 | dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n" , |
1861 | __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz); |
1862 | |
1863 | switch (iomode) { |
1864 | case IOMODE_READ: |
1865 | if (t->bm & THRESHOLD_RD) { |
1866 | dprintk("%s fsize %llu\n" , __func__, fsize); |
1867 | size_set = true; |
1868 | if (fsize < t->rd_sz) |
1869 | size = true; |
1870 | } |
1871 | if (t->bm & THRESHOLD_RD_IO) { |
1872 | dprintk("%s nfsi->read_io %llu\n" , __func__, |
1873 | nfsi->read_io); |
1874 | io_set = true; |
1875 | if (nfsi->read_io < t->rd_io_sz) |
1876 | io = true; |
1877 | } |
1878 | break; |
1879 | case IOMODE_RW: |
1880 | if (t->bm & THRESHOLD_WR) { |
1881 | dprintk("%s fsize %llu\n" , __func__, fsize); |
1882 | size_set = true; |
1883 | if (fsize < t->wr_sz) |
1884 | size = true; |
1885 | } |
1886 | if (t->bm & THRESHOLD_WR_IO) { |
1887 | dprintk("%s nfsi->write_io %llu\n" , __func__, |
1888 | nfsi->write_io); |
1889 | io_set = true; |
1890 | if (nfsi->write_io < t->wr_io_sz) |
1891 | io = true; |
1892 | } |
1893 | break; |
1894 | } |
1895 | if (size_set && io_set) { |
1896 | if (size && io) |
1897 | ret = true; |
1898 | } else if (size || io) |
1899 | ret = true; |
1900 | |
1901 | dprintk("<-- %s size %d io %d ret %d\n" , __func__, size, io, ret); |
1902 | return ret; |
1903 | } |
1904 | |
1905 | static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) |
1906 | { |
1907 | /* |
1908 | * send layoutcommit as it can hold up layoutreturn due to lseg |
1909 | * reference |
1910 | */ |
1911 | pnfs_layoutcommit_inode(inode: lo->plh_inode, sync: false); |
1912 | return wait_on_bit_action(word: &lo->plh_flags, bit: NFS_LAYOUT_RETURN, |
1913 | action: nfs_wait_bit_killable, |
1914 | TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); |
1915 | } |
1916 | |
1917 | static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) |
1918 | { |
1919 | atomic_inc(v: &lo->plh_outstanding); |
1920 | } |
1921 | |
1922 | static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) |
1923 | { |
1924 | if (atomic_dec_and_test(v: &lo->plh_outstanding) && |
1925 | test_and_clear_bit(nr: NFS_LAYOUT_DRAIN, addr: &lo->plh_flags)) |
1926 | wake_up_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_DRAIN); |
1927 | } |
1928 | |
1929 | static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) |
1930 | { |
1931 | return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags); |
1932 | } |
1933 | |
1934 | static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) |
1935 | { |
1936 | unsigned long *bitlock = &lo->plh_flags; |
1937 | |
1938 | clear_bit_unlock(nr: NFS_LAYOUT_FIRST_LAYOUTGET, addr: bitlock); |
1939 | smp_mb__after_atomic(); |
1940 | wake_up_bit(word: bitlock, bit: NFS_LAYOUT_FIRST_LAYOUTGET); |
1941 | } |
1942 | |
1943 | static void _add_to_server_list(struct pnfs_layout_hdr *lo, |
1944 | struct nfs_server *server) |
1945 | { |
1946 | if (!test_and_set_bit(nr: NFS_LAYOUT_HASHED, addr: &lo->plh_flags)) { |
1947 | struct nfs_client *clp = server->nfs_client; |
1948 | |
1949 | /* The lo must be on the clp list if there is any |
1950 | * chance of a CB_LAYOUTRECALL(FILE) coming in. |
1951 | */ |
1952 | spin_lock(lock: &clp->cl_lock); |
1953 | list_add_tail_rcu(new: &lo->plh_layouts, head: &server->layouts); |
1954 | spin_unlock(lock: &clp->cl_lock); |
1955 | } |
1956 | } |
1957 | |
1958 | /* |
1959 | * Layout segment is retreived from the server if not cached. |
1960 | * The appropriate layout segment is referenced and returned to the caller. |
1961 | */ |
1962 | struct pnfs_layout_segment * |
1963 | pnfs_update_layout(struct inode *ino, |
1964 | struct nfs_open_context *ctx, |
1965 | loff_t pos, |
1966 | u64 count, |
1967 | enum pnfs_iomode iomode, |
1968 | bool strict_iomode, |
1969 | gfp_t gfp_flags) |
1970 | { |
1971 | struct pnfs_layout_range arg = { |
1972 | .iomode = iomode, |
1973 | .offset = pos, |
1974 | .length = count, |
1975 | }; |
1976 | unsigned pg_offset; |
1977 | struct nfs_server *server = NFS_SERVER(inode: ino); |
1978 | struct nfs_client *clp = server->nfs_client; |
1979 | struct pnfs_layout_hdr *lo = NULL; |
1980 | struct pnfs_layout_segment *lseg = NULL; |
1981 | struct nfs4_layoutget *lgp; |
1982 | nfs4_stateid stateid; |
1983 | struct nfs4_exception exception = { |
1984 | .inode = ino, |
1985 | }; |
1986 | unsigned long giveup = jiffies + (clp->cl_lease_time << 1); |
1987 | bool first; |
1988 | |
1989 | if (!pnfs_enabled_sb(nfss: NFS_SERVER(inode: ino))) { |
1990 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
1991 | reason: PNFS_UPDATE_LAYOUT_NO_PNFS); |
1992 | goto out; |
1993 | } |
1994 | |
1995 | if (pnfs_within_mdsthreshold(ctx, ino, iomode)) { |
1996 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
1997 | reason: PNFS_UPDATE_LAYOUT_MDSTHRESH); |
1998 | goto out; |
1999 | } |
2000 | |
2001 | lookup_again: |
2002 | lseg = ERR_PTR(error: nfs4_client_recover_expired_lease(clp)); |
2003 | if (IS_ERR(ptr: lseg)) |
2004 | goto out; |
2005 | first = false; |
2006 | spin_lock(lock: &ino->i_lock); |
2007 | lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); |
2008 | if (lo == NULL) { |
2009 | spin_unlock(lock: &ino->i_lock); |
2010 | lseg = ERR_PTR(error: -ENOMEM); |
2011 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2012 | reason: PNFS_UPDATE_LAYOUT_NOMEM); |
2013 | goto out; |
2014 | } |
2015 | |
2016 | /* Do we even need to bother with this? */ |
2017 | if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
2018 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2019 | reason: PNFS_UPDATE_LAYOUT_BULK_RECALL); |
2020 | dprintk("%s matches recall, use MDS\n" , __func__); |
2021 | goto out_unlock; |
2022 | } |
2023 | |
2024 | /* if LAYOUTGET already failed once we don't try again */ |
2025 | if (pnfs_layout_io_test_failed(lo, iomode)) { |
2026 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2027 | reason: PNFS_UPDATE_LAYOUT_IO_TEST_FAIL); |
2028 | goto out_unlock; |
2029 | } |
2030 | |
2031 | /* |
2032 | * If the layout segment list is empty, but there are outstanding |
2033 | * layoutget calls, then they might be subject to a layoutrecall. |
2034 | */ |
2035 | if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && |
2036 | atomic_read(v: &lo->plh_outstanding) != 0) { |
2037 | spin_unlock(lock: &ino->i_lock); |
2038 | lseg = ERR_PTR(error: wait_on_bit(word: &lo->plh_flags, bit: NFS_LAYOUT_DRAIN, |
2039 | TASK_KILLABLE)); |
2040 | if (IS_ERR(ptr: lseg)) |
2041 | goto out_put_layout_hdr; |
2042 | pnfs_put_layout_hdr(lo); |
2043 | goto lookup_again; |
2044 | } |
2045 | |
2046 | /* |
2047 | * Because we free lsegs when sending LAYOUTRETURN, we need to wait |
2048 | * for LAYOUTRETURN. |
2049 | */ |
2050 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { |
2051 | spin_unlock(lock: &ino->i_lock); |
2052 | dprintk("%s wait for layoutreturn\n" , __func__); |
2053 | lseg = ERR_PTR(error: pnfs_prepare_to_retry_layoutget(lo)); |
2054 | if (!IS_ERR(ptr: lseg)) { |
2055 | pnfs_put_layout_hdr(lo); |
2056 | dprintk("%s retrying\n" , __func__); |
2057 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, |
2058 | lseg, |
2059 | reason: PNFS_UPDATE_LAYOUT_RETRY); |
2060 | goto lookup_again; |
2061 | } |
2062 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2063 | reason: PNFS_UPDATE_LAYOUT_RETURN); |
2064 | goto out_put_layout_hdr; |
2065 | } |
2066 | |
2067 | lseg = pnfs_find_lseg(lo, range: &arg, strict_iomode); |
2068 | if (lseg) { |
2069 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2070 | reason: PNFS_UPDATE_LAYOUT_FOUND_CACHED); |
2071 | goto out_unlock; |
2072 | } |
2073 | |
2074 | /* |
2075 | * Choose a stateid for the LAYOUTGET. If we don't have a layout |
2076 | * stateid, or it has been invalidated, then we must use the open |
2077 | * stateid. |
2078 | */ |
2079 | if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) { |
2080 | int status; |
2081 | |
2082 | /* |
2083 | * The first layoutget for the file. Need to serialize per |
2084 | * RFC 5661 Errata 3208. |
2085 | */ |
2086 | if (test_and_set_bit(nr: NFS_LAYOUT_FIRST_LAYOUTGET, |
2087 | addr: &lo->plh_flags)) { |
2088 | spin_unlock(lock: &ino->i_lock); |
2089 | lseg = ERR_PTR(error: wait_on_bit(word: &lo->plh_flags, |
2090 | bit: NFS_LAYOUT_FIRST_LAYOUTGET, |
2091 | TASK_KILLABLE)); |
2092 | if (IS_ERR(ptr: lseg)) |
2093 | goto out_put_layout_hdr; |
2094 | pnfs_put_layout_hdr(lo); |
2095 | dprintk("%s retrying\n" , __func__); |
2096 | goto lookup_again; |
2097 | } |
2098 | |
2099 | spin_unlock(lock: &ino->i_lock); |
2100 | first = true; |
2101 | status = nfs4_select_rw_stateid(ctx->state, |
2102 | iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ, |
2103 | NULL, &stateid, NULL); |
2104 | if (status != 0) { |
2105 | lseg = ERR_PTR(error: status); |
2106 | trace_pnfs_update_layout(inode: ino, pos, count, |
2107 | iomode, lo, lseg, |
2108 | reason: PNFS_UPDATE_LAYOUT_INVALID_OPEN); |
2109 | nfs4_schedule_stateid_recovery(server, ctx->state); |
2110 | pnfs_clear_first_layoutget(lo); |
2111 | pnfs_put_layout_hdr(lo); |
2112 | goto lookup_again; |
2113 | } |
2114 | spin_lock(lock: &ino->i_lock); |
2115 | } else { |
2116 | nfs4_stateid_copy(dst: &stateid, src: &lo->plh_stateid); |
2117 | } |
2118 | |
2119 | if (pnfs_layoutgets_blocked(lo)) { |
2120 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2121 | reason: PNFS_UPDATE_LAYOUT_BLOCKED); |
2122 | goto out_unlock; |
2123 | } |
2124 | nfs_layoutget_begin(lo); |
2125 | spin_unlock(lock: &ino->i_lock); |
2126 | |
2127 | _add_to_server_list(lo, server); |
2128 | |
2129 | pg_offset = arg.offset & ~PAGE_MASK; |
2130 | if (pg_offset) { |
2131 | arg.offset -= pg_offset; |
2132 | arg.length += pg_offset; |
2133 | } |
2134 | if (arg.length != NFS4_MAX_UINT64) |
2135 | arg.length = PAGE_ALIGN(arg.length); |
2136 | |
2137 | lgp = pnfs_alloc_init_layoutget_args(ino, ctx, stateid: &stateid, range: &arg, gfp_flags); |
2138 | if (!lgp) { |
2139 | lseg = ERR_PTR(error: -ENOMEM); |
2140 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, NULL, |
2141 | reason: PNFS_UPDATE_LAYOUT_NOMEM); |
2142 | nfs_layoutget_end(lo); |
2143 | goto out_put_layout_hdr; |
2144 | } |
2145 | |
2146 | lgp->lo = lo; |
2147 | pnfs_get_layout_hdr(lo); |
2148 | |
2149 | lseg = nfs4_proc_layoutget(lgp, exception: &exception); |
2150 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2151 | reason: PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET); |
2152 | nfs_layoutget_end(lo); |
2153 | if (IS_ERR(ptr: lseg)) { |
2154 | switch(PTR_ERR(ptr: lseg)) { |
2155 | case -EBUSY: |
2156 | if (time_after(jiffies, giveup)) |
2157 | lseg = NULL; |
2158 | break; |
2159 | case -ERECALLCONFLICT: |
2160 | case -EAGAIN: |
2161 | break; |
2162 | case -ENODATA: |
2163 | /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */ |
2164 | pnfs_layout_set_fail_bit( |
2165 | lo, fail_bit: pnfs_iomode_to_fail_bit(iomode)); |
2166 | lseg = NULL; |
2167 | goto out_put_layout_hdr; |
2168 | default: |
2169 | if (!nfs_error_is_fatal(err: PTR_ERR(ptr: lseg))) { |
2170 | pnfs_layout_clear_fail_bit(lo, fail_bit: pnfs_iomode_to_fail_bit(iomode)); |
2171 | lseg = NULL; |
2172 | } |
2173 | goto out_put_layout_hdr; |
2174 | } |
2175 | if (lseg) { |
2176 | if (!exception.retry) |
2177 | goto out_put_layout_hdr; |
2178 | if (first) |
2179 | pnfs_clear_first_layoutget(lo); |
2180 | trace_pnfs_update_layout(inode: ino, pos, count, |
2181 | iomode, lo, lseg, reason: PNFS_UPDATE_LAYOUT_RETRY); |
2182 | pnfs_put_layout_hdr(lo); |
2183 | goto lookup_again; |
2184 | } |
2185 | } else { |
2186 | pnfs_layout_clear_fail_bit(lo, fail_bit: pnfs_iomode_to_fail_bit(iomode)); |
2187 | } |
2188 | |
2189 | out_put_layout_hdr: |
2190 | if (first) |
2191 | pnfs_clear_first_layoutget(lo); |
2192 | trace_pnfs_update_layout(inode: ino, pos, count, iomode, lo, lseg, |
2193 | reason: PNFS_UPDATE_LAYOUT_EXIT); |
2194 | pnfs_put_layout_hdr(lo); |
2195 | out: |
2196 | dprintk("%s: inode %s/%llu pNFS layout segment %s for " |
2197 | "(%s, offset: %llu, length: %llu)\n" , |
2198 | __func__, ino->i_sb->s_id, |
2199 | (unsigned long long)NFS_FILEID(ino), |
2200 | IS_ERR_OR_NULL(lseg) ? "not found" : "found" , |
2201 | iomode==IOMODE_RW ? "read/write" : "read-only" , |
2202 | (unsigned long long)pos, |
2203 | (unsigned long long)count); |
2204 | return lseg; |
2205 | out_unlock: |
2206 | spin_unlock(lock: &ino->i_lock); |
2207 | goto out_put_layout_hdr; |
2208 | } |
2209 | EXPORT_SYMBOL_GPL(pnfs_update_layout); |
2210 | |
2211 | static bool |
2212 | pnfs_sanity_check_layout_range(struct pnfs_layout_range *range) |
2213 | { |
2214 | switch (range->iomode) { |
2215 | case IOMODE_READ: |
2216 | case IOMODE_RW: |
2217 | break; |
2218 | default: |
2219 | return false; |
2220 | } |
2221 | if (range->offset == NFS4_MAX_UINT64) |
2222 | return false; |
2223 | if (range->length == 0) |
2224 | return false; |
2225 | if (range->length != NFS4_MAX_UINT64 && |
2226 | range->length > NFS4_MAX_UINT64 - range->offset) |
2227 | return false; |
2228 | return true; |
2229 | } |
2230 | |
2231 | static struct pnfs_layout_hdr * |
2232 | _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx) |
2233 | { |
2234 | struct pnfs_layout_hdr *lo; |
2235 | |
2236 | spin_lock(lock: &ino->i_lock); |
2237 | lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags: nfs_io_gfp_mask()); |
2238 | if (!lo) |
2239 | goto out_unlock; |
2240 | if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) |
2241 | goto out_unlock; |
2242 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) |
2243 | goto out_unlock; |
2244 | if (pnfs_layoutgets_blocked(lo)) |
2245 | goto out_unlock; |
2246 | if (test_and_set_bit(nr: NFS_LAYOUT_FIRST_LAYOUTGET, addr: &lo->plh_flags)) |
2247 | goto out_unlock; |
2248 | nfs_layoutget_begin(lo); |
2249 | spin_unlock(lock: &ino->i_lock); |
2250 | _add_to_server_list(lo, server: NFS_SERVER(inode: ino)); |
2251 | return lo; |
2252 | |
2253 | out_unlock: |
2254 | spin_unlock(lock: &ino->i_lock); |
2255 | pnfs_put_layout_hdr(lo); |
2256 | return NULL; |
2257 | } |
2258 | |
2259 | static void _lgopen_prepare_attached(struct nfs4_opendata *data, |
2260 | struct nfs_open_context *ctx) |
2261 | { |
2262 | struct inode *ino = data->dentry->d_inode; |
2263 | struct pnfs_layout_range rng = { |
2264 | .iomode = (data->o_arg.fmode & FMODE_WRITE) ? |
2265 | IOMODE_RW: IOMODE_READ, |
2266 | .offset = 0, |
2267 | .length = NFS4_MAX_UINT64, |
2268 | }; |
2269 | struct nfs4_layoutget *lgp; |
2270 | struct pnfs_layout_hdr *lo; |
2271 | |
2272 | /* Heuristic: don't send layoutget if we have cached data */ |
2273 | if (rng.iomode == IOMODE_READ && |
2274 | (i_size_read(inode: ino) == 0 || ino->i_mapping->nrpages != 0)) |
2275 | return; |
2276 | |
2277 | lo = _pnfs_grab_empty_layout(ino, ctx); |
2278 | if (!lo) |
2279 | return; |
2280 | lgp = pnfs_alloc_init_layoutget_args(ino, ctx, stateid: ¤t_stateid, range: &rng, |
2281 | gfp_flags: nfs_io_gfp_mask()); |
2282 | if (!lgp) { |
2283 | pnfs_clear_first_layoutget(lo); |
2284 | nfs_layoutget_end(lo); |
2285 | pnfs_put_layout_hdr(lo); |
2286 | return; |
2287 | } |
2288 | lgp->lo = lo; |
2289 | data->lgp = lgp; |
2290 | data->o_arg.lg_args = &lgp->args; |
2291 | data->o_res.lg_res = &lgp->res; |
2292 | } |
2293 | |
2294 | static void _lgopen_prepare_floating(struct nfs4_opendata *data, |
2295 | struct nfs_open_context *ctx) |
2296 | { |
2297 | struct inode *ino = data->dentry->d_inode; |
2298 | struct pnfs_layout_range rng = { |
2299 | .iomode = (data->o_arg.fmode & FMODE_WRITE) ? |
2300 | IOMODE_RW: IOMODE_READ, |
2301 | .offset = 0, |
2302 | .length = NFS4_MAX_UINT64, |
2303 | }; |
2304 | struct nfs4_layoutget *lgp; |
2305 | |
2306 | lgp = pnfs_alloc_init_layoutget_args(ino, ctx, stateid: ¤t_stateid, range: &rng, |
2307 | gfp_flags: nfs_io_gfp_mask()); |
2308 | if (!lgp) |
2309 | return; |
2310 | data->lgp = lgp; |
2311 | data->o_arg.lg_args = &lgp->args; |
2312 | data->o_res.lg_res = &lgp->res; |
2313 | } |
2314 | |
2315 | void pnfs_lgopen_prepare(struct nfs4_opendata *data, |
2316 | struct nfs_open_context *ctx) |
2317 | { |
2318 | struct nfs_server *server = NFS_SERVER(inode: data->dir->d_inode); |
2319 | |
2320 | if (!(pnfs_enabled_sb(nfss: server) && |
2321 | server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN)) |
2322 | return; |
2323 | /* Could check on max_ops, but currently hardcoded high enough */ |
2324 | if (!nfs_server_capable(inode: data->dir->d_inode, NFS_CAP_LGOPEN)) |
2325 | return; |
2326 | if (data->lgp) |
2327 | return; |
2328 | if (data->state) |
2329 | _lgopen_prepare_attached(data, ctx); |
2330 | else |
2331 | _lgopen_prepare_floating(data, ctx); |
2332 | } |
2333 | |
2334 | void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp, |
2335 | struct nfs_open_context *ctx) |
2336 | { |
2337 | struct pnfs_layout_hdr *lo; |
2338 | struct pnfs_layout_segment *lseg; |
2339 | struct nfs_server *srv = NFS_SERVER(inode: ino); |
2340 | u32 iomode; |
2341 | |
2342 | if (!lgp) |
2343 | return; |
2344 | dprintk("%s: entered with status %i\n" , __func__, lgp->res.status); |
2345 | if (lgp->res.status) { |
2346 | switch (lgp->res.status) { |
2347 | default: |
2348 | break; |
2349 | /* |
2350 | * Halt lgopen attempts if the server doesn't recognise |
2351 | * the "current stateid" value, the layout type, or the |
2352 | * layoutget operation as being valid. |
2353 | * Also if it complains about too many ops in the compound |
2354 | * or of the request/reply being too big. |
2355 | */ |
2356 | case -NFS4ERR_BAD_STATEID: |
2357 | case -NFS4ERR_NOTSUPP: |
2358 | case -NFS4ERR_REP_TOO_BIG: |
2359 | case -NFS4ERR_REP_TOO_BIG_TO_CACHE: |
2360 | case -NFS4ERR_REQ_TOO_BIG: |
2361 | case -NFS4ERR_TOO_MANY_OPS: |
2362 | case -NFS4ERR_UNKNOWN_LAYOUTTYPE: |
2363 | srv->caps &= ~NFS_CAP_LGOPEN; |
2364 | } |
2365 | return; |
2366 | } |
2367 | if (!lgp->lo) { |
2368 | lo = _pnfs_grab_empty_layout(ino, ctx); |
2369 | if (!lo) |
2370 | return; |
2371 | lgp->lo = lo; |
2372 | } else |
2373 | lo = lgp->lo; |
2374 | |
2375 | lseg = pnfs_layout_process(lgp); |
2376 | if (!IS_ERR(ptr: lseg)) { |
2377 | iomode = lgp->args.range.iomode; |
2378 | pnfs_layout_clear_fail_bit(lo, fail_bit: pnfs_iomode_to_fail_bit(iomode)); |
2379 | pnfs_put_lseg(lseg); |
2380 | } |
2381 | } |
2382 | |
2383 | void nfs4_lgopen_release(struct nfs4_layoutget *lgp) |
2384 | { |
2385 | if (lgp != NULL) { |
2386 | if (lgp->lo) { |
2387 | pnfs_clear_first_layoutget(lo: lgp->lo); |
2388 | nfs_layoutget_end(lo: lgp->lo); |
2389 | } |
2390 | pnfs_layoutget_free(lgp); |
2391 | } |
2392 | } |
2393 | |
2394 | struct pnfs_layout_segment * |
2395 | pnfs_layout_process(struct nfs4_layoutget *lgp) |
2396 | { |
2397 | struct pnfs_layout_hdr *lo = lgp->lo; |
2398 | struct nfs4_layoutget_res *res = &lgp->res; |
2399 | struct pnfs_layout_segment *lseg; |
2400 | struct inode *ino = lo->plh_inode; |
2401 | LIST_HEAD(free_me); |
2402 | |
2403 | if (!pnfs_sanity_check_layout_range(range: &res->range)) |
2404 | return ERR_PTR(error: -EINVAL); |
2405 | |
2406 | /* Inject layout blob into I/O device driver */ |
2407 | lseg = NFS_SERVER(inode: ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); |
2408 | if (IS_ERR_OR_NULL(ptr: lseg)) { |
2409 | if (!lseg) |
2410 | lseg = ERR_PTR(error: -ENOMEM); |
2411 | |
2412 | dprintk("%s: Could not allocate layout: error %ld\n" , |
2413 | __func__, PTR_ERR(lseg)); |
2414 | return lseg; |
2415 | } |
2416 | |
2417 | pnfs_init_lseg(lo, lseg, range: &res->range, stateid: &res->stateid); |
2418 | |
2419 | spin_lock(lock: &ino->i_lock); |
2420 | if (pnfs_layoutgets_blocked(lo)) { |
2421 | dprintk("%s forget reply due to state\n" , __func__); |
2422 | goto out_forget; |
2423 | } |
2424 | |
2425 | if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && |
2426 | !pnfs_is_first_layoutget(lo)) |
2427 | goto out_forget; |
2428 | |
2429 | if (nfs4_stateid_match_other(dst: &lo->plh_stateid, src: &res->stateid)) { |
2430 | /* existing state ID, make sure the sequence number matches. */ |
2431 | if (pnfs_layout_stateid_blocked(lo, stateid: &res->stateid)) { |
2432 | if (!pnfs_layout_is_valid(lo)) |
2433 | lo->plh_barrier = 0; |
2434 | dprintk("%s forget reply due to sequence\n" , __func__); |
2435 | goto out_forget; |
2436 | } |
2437 | pnfs_set_layout_stateid(lo, new: &res->stateid, cred: lgp->cred, update_barrier: false); |
2438 | } else if (pnfs_layout_is_valid(lo)) { |
2439 | /* |
2440 | * We got an entirely new state ID. Mark all segments for the |
2441 | * inode invalid, and retry the layoutget |
2442 | */ |
2443 | struct pnfs_layout_range range = { |
2444 | .iomode = IOMODE_ANY, |
2445 | .length = NFS4_MAX_UINT64, |
2446 | }; |
2447 | pnfs_mark_matching_lsegs_return(lo, tmp_list: &free_me, recall_range: &range, seq: 0); |
2448 | goto out_forget; |
2449 | } else { |
2450 | /* We have a completely new layout */ |
2451 | pnfs_set_layout_stateid(lo, new: &res->stateid, cred: lgp->cred, update_barrier: true); |
2452 | } |
2453 | |
2454 | pnfs_get_lseg(lseg); |
2455 | pnfs_layout_insert_lseg(lo, lseg, free_me: &free_me); |
2456 | |
2457 | |
2458 | if (res->return_on_close) |
2459 | set_bit(nr: NFS_LSEG_ROC, addr: &lseg->pls_flags); |
2460 | |
2461 | spin_unlock(lock: &ino->i_lock); |
2462 | pnfs_free_lseg_list(free_me: &free_me); |
2463 | return lseg; |
2464 | |
2465 | out_forget: |
2466 | spin_unlock(lock: &ino->i_lock); |
2467 | lseg->pls_layout = lo; |
2468 | NFS_SERVER(inode: ino)->pnfs_curr_ld->free_lseg(lseg); |
2469 | return ERR_PTR(error: -EAGAIN); |
2470 | } |
2471 | |
2472 | /** |
2473 | * pnfs_mark_matching_lsegs_return - Free or return matching layout segments |
2474 | * @lo: pointer to layout header |
2475 | * @tmp_list: list header to be used with pnfs_free_lseg_list() |
2476 | * @return_range: describe layout segment ranges to be returned |
2477 | * @seq: stateid seqid to match |
2478 | * |
2479 | * This function is mainly intended for use by layoutrecall. It attempts |
2480 | * to free the layout segment immediately, or else to mark it for return |
2481 | * as soon as its reference count drops to zero. |
2482 | * |
2483 | * Returns |
2484 | * - 0: a layoutreturn needs to be scheduled. |
2485 | * - EBUSY: there are layout segment that are still in use. |
2486 | * - ENOENT: there are no layout segments that need to be returned. |
2487 | */ |
2488 | int |
2489 | pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, |
2490 | struct list_head *tmp_list, |
2491 | const struct pnfs_layout_range *return_range, |
2492 | u32 seq) |
2493 | { |
2494 | struct pnfs_layout_segment *lseg, *next; |
2495 | struct nfs_server *server = NFS_SERVER(inode: lo->plh_inode); |
2496 | int remaining = 0; |
2497 | |
2498 | dprintk("%s:Begin lo %p\n" , __func__, lo); |
2499 | |
2500 | assert_spin_locked(&lo->plh_inode->i_lock); |
2501 | |
2502 | if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) |
2503 | tmp_list = &lo->plh_return_segs; |
2504 | |
2505 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) |
2506 | if (pnfs_match_lseg_recall(lseg, recall_range: return_range, seq)) { |
2507 | dprintk("%s: marking lseg %p iomode %d " |
2508 | "offset %llu length %llu\n" , __func__, |
2509 | lseg, lseg->pls_range.iomode, |
2510 | lseg->pls_range.offset, |
2511 | lseg->pls_range.length); |
2512 | if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) |
2513 | tmp_list = &lo->plh_return_segs; |
2514 | if (mark_lseg_invalid(lseg, tmp_list)) |
2515 | continue; |
2516 | remaining++; |
2517 | set_bit(nr: NFS_LSEG_LAYOUTRETURN, addr: &lseg->pls_flags); |
2518 | pnfs_lseg_cancel_io(server, lseg); |
2519 | } |
2520 | |
2521 | if (remaining) { |
2522 | pnfs_set_plh_return_info(lo, iomode: return_range->iomode, seq); |
2523 | return -EBUSY; |
2524 | } |
2525 | |
2526 | if (!list_empty(head: &lo->plh_return_segs)) { |
2527 | pnfs_set_plh_return_info(lo, iomode: return_range->iomode, seq); |
2528 | return 0; |
2529 | } |
2530 | |
2531 | return -ENOENT; |
2532 | } |
2533 | |
2534 | static void |
2535 | pnfs_mark_layout_for_return(struct inode *inode, |
2536 | const struct pnfs_layout_range *range) |
2537 | { |
2538 | struct pnfs_layout_hdr *lo; |
2539 | bool return_now = false; |
2540 | |
2541 | spin_lock(lock: &inode->i_lock); |
2542 | lo = NFS_I(inode)->layout; |
2543 | if (!pnfs_layout_is_valid(lo)) { |
2544 | spin_unlock(lock: &inode->i_lock); |
2545 | return; |
2546 | } |
2547 | pnfs_set_plh_return_info(lo, iomode: range->iomode, seq: 0); |
2548 | /* |
2549 | * mark all matching lsegs so that we are sure to have no live |
2550 | * segments at hand when sending layoutreturn. See pnfs_put_lseg() |
2551 | * for how it works. |
2552 | */ |
2553 | if (pnfs_mark_matching_lsegs_return(lo, tmp_list: &lo->plh_return_segs, return_range: range, seq: 0) != -EBUSY) { |
2554 | const struct cred *cred; |
2555 | nfs4_stateid stateid; |
2556 | enum pnfs_iomode iomode; |
2557 | |
2558 | return_now = pnfs_prepare_layoutreturn(lo, stateid: &stateid, cred: &cred, iomode: &iomode); |
2559 | spin_unlock(lock: &inode->i_lock); |
2560 | if (return_now) |
2561 | pnfs_send_layoutreturn(lo, stateid: &stateid, pcred: &cred, iomode, sync: false); |
2562 | } else { |
2563 | spin_unlock(lock: &inode->i_lock); |
2564 | nfs_commit_inode(inode, 0); |
2565 | } |
2566 | } |
2567 | |
2568 | void pnfs_error_mark_layout_for_return(struct inode *inode, |
2569 | struct pnfs_layout_segment *lseg) |
2570 | { |
2571 | struct pnfs_layout_range range = { |
2572 | .iomode = lseg->pls_range.iomode, |
2573 | .offset = 0, |
2574 | .length = NFS4_MAX_UINT64, |
2575 | }; |
2576 | |
2577 | pnfs_mark_layout_for_return(inode, range: &range); |
2578 | } |
2579 | EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return); |
2580 | |
2581 | static bool |
2582 | pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo) |
2583 | { |
2584 | return pnfs_layout_is_valid(lo) && |
2585 | !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) && |
2586 | !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); |
2587 | } |
2588 | |
2589 | static struct pnfs_layout_segment * |
2590 | pnfs_find_first_lseg(struct pnfs_layout_hdr *lo, |
2591 | const struct pnfs_layout_range *range, |
2592 | enum pnfs_iomode iomode) |
2593 | { |
2594 | struct pnfs_layout_segment *lseg; |
2595 | |
2596 | list_for_each_entry(lseg, &lo->plh_segs, pls_list) { |
2597 | if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) |
2598 | continue; |
2599 | if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) |
2600 | continue; |
2601 | if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY) |
2602 | continue; |
2603 | if (pnfs_lseg_range_intersecting(l1: &lseg->pls_range, l2: range)) |
2604 | return lseg; |
2605 | } |
2606 | return NULL; |
2607 | } |
2608 | |
2609 | /* Find open file states whose mode matches that of the range */ |
2610 | static bool |
2611 | pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo, |
2612 | const struct pnfs_layout_range *range) |
2613 | { |
2614 | struct list_head *head; |
2615 | struct nfs_open_context *ctx; |
2616 | fmode_t mode = 0; |
2617 | |
2618 | if (!pnfs_layout_can_be_returned(lo) || |
2619 | !pnfs_find_first_lseg(lo, range, iomode: range->iomode)) |
2620 | return false; |
2621 | |
2622 | head = &NFS_I(inode: lo->plh_inode)->open_files; |
2623 | list_for_each_entry_rcu(ctx, head, list) { |
2624 | if (ctx->state) |
2625 | mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE); |
2626 | } |
2627 | |
2628 | switch (range->iomode) { |
2629 | default: |
2630 | break; |
2631 | case IOMODE_READ: |
2632 | mode &= ~FMODE_WRITE; |
2633 | break; |
2634 | case IOMODE_RW: |
2635 | if (pnfs_find_first_lseg(lo, range, iomode: IOMODE_READ)) |
2636 | mode &= ~FMODE_READ; |
2637 | } |
2638 | return mode == 0; |
2639 | } |
2640 | |
2641 | static int pnfs_layout_return_unused_byserver(struct nfs_server *server, |
2642 | void *data) |
2643 | { |
2644 | const struct pnfs_layout_range *range = data; |
2645 | const struct cred *cred; |
2646 | struct pnfs_layout_hdr *lo; |
2647 | struct inode *inode; |
2648 | nfs4_stateid stateid; |
2649 | enum pnfs_iomode iomode; |
2650 | |
2651 | restart: |
2652 | rcu_read_lock(); |
2653 | list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { |
2654 | inode = lo->plh_inode; |
2655 | if (!inode || !pnfs_layout_can_be_returned(lo) || |
2656 | test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) |
2657 | continue; |
2658 | spin_lock(lock: &inode->i_lock); |
2659 | if (!lo->plh_inode || |
2660 | !pnfs_should_return_unused_layout(lo, range)) { |
2661 | spin_unlock(lock: &inode->i_lock); |
2662 | continue; |
2663 | } |
2664 | pnfs_get_layout_hdr(lo); |
2665 | pnfs_set_plh_return_info(lo, iomode: range->iomode, seq: 0); |
2666 | if (pnfs_mark_matching_lsegs_return(lo, tmp_list: &lo->plh_return_segs, |
2667 | return_range: range, seq: 0) != 0 || |
2668 | !pnfs_prepare_layoutreturn(lo, stateid: &stateid, cred: &cred, iomode: &iomode)) { |
2669 | spin_unlock(lock: &inode->i_lock); |
2670 | rcu_read_unlock(); |
2671 | pnfs_put_layout_hdr(lo); |
2672 | cond_resched(); |
2673 | goto restart; |
2674 | } |
2675 | spin_unlock(lock: &inode->i_lock); |
2676 | rcu_read_unlock(); |
2677 | pnfs_send_layoutreturn(lo, stateid: &stateid, pcred: &cred, iomode, sync: false); |
2678 | pnfs_put_layout_hdr(lo); |
2679 | cond_resched(); |
2680 | goto restart; |
2681 | } |
2682 | rcu_read_unlock(); |
2683 | return 0; |
2684 | } |
2685 | |
2686 | void |
2687 | pnfs_layout_return_unused_byclid(struct nfs_client *clp, |
2688 | enum pnfs_iomode iomode) |
2689 | { |
2690 | struct pnfs_layout_range range = { |
2691 | .iomode = iomode, |
2692 | .offset = 0, |
2693 | .length = NFS4_MAX_UINT64, |
2694 | }; |
2695 | |
2696 | nfs_client_for_each_server(clp, fn: pnfs_layout_return_unused_byserver, |
2697 | data: &range); |
2698 | } |
2699 | |
2700 | void |
2701 | pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio) |
2702 | { |
2703 | if (pgio->pg_lseg == NULL || |
2704 | test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags)) |
2705 | return; |
2706 | pnfs_put_lseg(pgio->pg_lseg); |
2707 | pgio->pg_lseg = NULL; |
2708 | } |
2709 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); |
2710 | |
2711 | /* |
2712 | * Check for any intersection between the request and the pgio->pg_lseg, |
2713 | * and if none, put this pgio->pg_lseg away. |
2714 | */ |
2715 | void |
2716 | pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) |
2717 | { |
2718 | if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(lseg: pgio->pg_lseg, req)) { |
2719 | pnfs_put_lseg(pgio->pg_lseg); |
2720 | pgio->pg_lseg = NULL; |
2721 | } |
2722 | } |
2723 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range); |
2724 | |
2725 | void |
2726 | pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) |
2727 | { |
2728 | u64 rd_size; |
2729 | |
2730 | pnfs_generic_pg_check_layout(pgio); |
2731 | pnfs_generic_pg_check_range(pgio, req); |
2732 | if (pgio->pg_lseg == NULL) { |
2733 | if (pgio->pg_dreq == NULL) |
2734 | rd_size = i_size_read(inode: pgio->pg_inode) - req_offset(req); |
2735 | else |
2736 | rd_size = nfs_dreq_bytes_left(dreq: pgio->pg_dreq); |
2737 | |
2738 | pgio->pg_lseg = |
2739 | pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), |
2740 | req_offset(req), rd_size, |
2741 | IOMODE_READ, false, |
2742 | nfs_io_gfp_mask()); |
2743 | if (IS_ERR(ptr: pgio->pg_lseg)) { |
2744 | pgio->pg_error = PTR_ERR(ptr: pgio->pg_lseg); |
2745 | pgio->pg_lseg = NULL; |
2746 | return; |
2747 | } |
2748 | } |
2749 | /* If no lseg, fall back to read through mds */ |
2750 | if (pgio->pg_lseg == NULL) |
2751 | nfs_pageio_reset_read_mds(pgio); |
2752 | |
2753 | } |
2754 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read); |
2755 | |
2756 | void |
2757 | pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, |
2758 | struct nfs_page *req, u64 wb_size) |
2759 | { |
2760 | pnfs_generic_pg_check_layout(pgio); |
2761 | pnfs_generic_pg_check_range(pgio, req); |
2762 | if (pgio->pg_lseg == NULL) { |
2763 | pgio->pg_lseg = |
2764 | pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), |
2765 | req_offset(req), wb_size, IOMODE_RW, |
2766 | false, nfs_io_gfp_mask()); |
2767 | if (IS_ERR(ptr: pgio->pg_lseg)) { |
2768 | pgio->pg_error = PTR_ERR(ptr: pgio->pg_lseg); |
2769 | pgio->pg_lseg = NULL; |
2770 | return; |
2771 | } |
2772 | } |
2773 | /* If no lseg, fall back to write through mds */ |
2774 | if (pgio->pg_lseg == NULL) |
2775 | nfs_pageio_reset_write_mds(pgio); |
2776 | } |
2777 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); |
2778 | |
2779 | void |
2780 | pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc) |
2781 | { |
2782 | if (desc->pg_lseg) { |
2783 | pnfs_put_lseg(desc->pg_lseg); |
2784 | desc->pg_lseg = NULL; |
2785 | } |
2786 | } |
2787 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup); |
2788 | |
2789 | /* |
2790 | * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number |
2791 | * of bytes (maximum @req->wb_bytes) that can be coalesced. |
2792 | */ |
2793 | size_t |
2794 | pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, |
2795 | struct nfs_page *prev, struct nfs_page *req) |
2796 | { |
2797 | unsigned int size; |
2798 | u64 seg_end, req_start, seg_left; |
2799 | |
2800 | size = nfs_generic_pg_test(desc: pgio, prev, req); |
2801 | if (!size) |
2802 | return 0; |
2803 | |
2804 | /* |
2805 | * 'size' contains the number of bytes left in the current page (up |
2806 | * to the original size asked for in @req->wb_bytes). |
2807 | * |
2808 | * Calculate how many bytes are left in the layout segment |
2809 | * and if there are less bytes than 'size', return that instead. |
2810 | * |
2811 | * Please also note that 'end_offset' is actually the offset of the |
2812 | * first byte that lies outside the pnfs_layout_range. FIXME? |
2813 | * |
2814 | */ |
2815 | if (pgio->pg_lseg) { |
2816 | seg_end = pnfs_end_offset(start: pgio->pg_lseg->pls_range.offset, |
2817 | len: pgio->pg_lseg->pls_range.length); |
2818 | req_start = req_offset(req); |
2819 | |
2820 | /* start of request is past the last byte of this segment */ |
2821 | if (req_start >= seg_end) |
2822 | return 0; |
2823 | |
2824 | /* adjust 'size' iff there are fewer bytes left in the |
2825 | * segment than what nfs_generic_pg_test returned */ |
2826 | seg_left = seg_end - req_start; |
2827 | if (seg_left < size) |
2828 | size = (unsigned int)seg_left; |
2829 | } |
2830 | |
2831 | return size; |
2832 | } |
2833 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); |
2834 | |
2835 | int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr) |
2836 | { |
2837 | struct nfs_pageio_descriptor pgio; |
2838 | |
2839 | /* Resend all requests through the MDS */ |
2840 | nfs_pageio_init_write(pgio: &pgio, inode: hdr->inode, FLUSH_STABLE, force_mds: true, |
2841 | compl_ops: hdr->completion_ops); |
2842 | return nfs_pageio_resend(&pgio, hdr); |
2843 | } |
2844 | EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); |
2845 | |
2846 | static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr) |
2847 | { |
2848 | |
2849 | dprintk("pnfs write error = %d\n" , hdr->pnfs_error); |
2850 | if (NFS_SERVER(inode: hdr->inode)->pnfs_curr_ld->flags & |
2851 | PNFS_LAYOUTRET_ON_ERROR) { |
2852 | pnfs_return_layout(ino: hdr->inode); |
2853 | } |
2854 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) |
2855 | hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr); |
2856 | } |
2857 | |
2858 | /* |
2859 | * Called by non rpc-based layout drivers |
2860 | */ |
2861 | void pnfs_ld_write_done(struct nfs_pgio_header *hdr) |
2862 | { |
2863 | if (likely(!hdr->pnfs_error)) { |
2864 | pnfs_set_layoutcommit(hdr->inode, hdr->lseg, |
2865 | hdr->mds_offset + hdr->res.count); |
2866 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); |
2867 | } |
2868 | trace_nfs4_pnfs_write(hdr, error: hdr->pnfs_error); |
2869 | if (unlikely(hdr->pnfs_error)) |
2870 | pnfs_ld_handle_write_error(hdr); |
2871 | hdr->mds_ops->rpc_release(hdr); |
2872 | } |
2873 | EXPORT_SYMBOL_GPL(pnfs_ld_write_done); |
2874 | |
2875 | static void |
2876 | pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, |
2877 | struct nfs_pgio_header *hdr) |
2878 | { |
2879 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
2880 | |
2881 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) { |
2882 | list_splice_tail_init(list: &hdr->pages, head: &mirror->pg_list); |
2883 | nfs_pageio_reset_write_mds(pgio: desc); |
2884 | mirror->pg_recoalesce = 1; |
2885 | } |
2886 | hdr->completion_ops->completion(hdr); |
2887 | } |
2888 | |
2889 | static enum pnfs_try_status |
2890 | pnfs_try_to_write_data(struct nfs_pgio_header *hdr, |
2891 | const struct rpc_call_ops *call_ops, |
2892 | struct pnfs_layout_segment *lseg, |
2893 | int how) |
2894 | { |
2895 | struct inode *inode = hdr->inode; |
2896 | enum pnfs_try_status trypnfs; |
2897 | struct nfs_server *nfss = NFS_SERVER(inode); |
2898 | |
2899 | hdr->mds_ops = call_ops; |
2900 | |
2901 | dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n" , __func__, |
2902 | inode->i_ino, hdr->args.count, hdr->args.offset, how); |
2903 | trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how); |
2904 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
2905 | nfs_inc_stats(inode, stat: NFSIOS_PNFS_WRITE); |
2906 | dprintk("%s End (trypnfs:%d)\n" , __func__, trypnfs); |
2907 | return trypnfs; |
2908 | } |
2909 | |
2910 | static void |
2911 | pnfs_do_write(struct nfs_pageio_descriptor *desc, |
2912 | struct nfs_pgio_header *hdr, int how) |
2913 | { |
2914 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
2915 | struct pnfs_layout_segment *lseg = desc->pg_lseg; |
2916 | enum pnfs_try_status trypnfs; |
2917 | |
2918 | trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how); |
2919 | switch (trypnfs) { |
2920 | case PNFS_NOT_ATTEMPTED: |
2921 | pnfs_write_through_mds(desc, hdr); |
2922 | break; |
2923 | case PNFS_ATTEMPTED: |
2924 | break; |
2925 | case PNFS_TRY_AGAIN: |
2926 | /* cleanup hdr and prepare to redo pnfs */ |
2927 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) { |
2928 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
2929 | list_splice_init(list: &hdr->pages, head: &mirror->pg_list); |
2930 | mirror->pg_recoalesce = 1; |
2931 | } |
2932 | hdr->mds_ops->rpc_release(hdr); |
2933 | } |
2934 | } |
2935 | |
2936 | static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) |
2937 | { |
2938 | pnfs_put_lseg(hdr->lseg); |
2939 | nfs_pgio_header_free(hdr); |
2940 | } |
2941 | |
2942 | int |
2943 | pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) |
2944 | { |
2945 | struct nfs_pgio_header *hdr; |
2946 | int ret; |
2947 | |
2948 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
2949 | if (!hdr) { |
2950 | desc->pg_error = -ENOMEM; |
2951 | return desc->pg_error; |
2952 | } |
2953 | nfs_pgheader_init(desc, hdr, release: pnfs_writehdr_free); |
2954 | |
2955 | hdr->lseg = pnfs_get_lseg(lseg: desc->pg_lseg); |
2956 | ret = nfs_generic_pgio(desc, hdr); |
2957 | if (!ret) |
2958 | pnfs_do_write(desc, hdr, how: desc->pg_ioflags); |
2959 | |
2960 | return ret; |
2961 | } |
2962 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); |
2963 | |
2964 | int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr) |
2965 | { |
2966 | struct nfs_pageio_descriptor pgio; |
2967 | |
2968 | /* Resend all requests through the MDS */ |
2969 | nfs_pageio_init_read(pgio: &pgio, inode: hdr->inode, force_mds: true, compl_ops: hdr->completion_ops); |
2970 | return nfs_pageio_resend(&pgio, hdr); |
2971 | } |
2972 | EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); |
2973 | |
2974 | static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr) |
2975 | { |
2976 | dprintk("pnfs read error = %d\n" , hdr->pnfs_error); |
2977 | if (NFS_SERVER(inode: hdr->inode)->pnfs_curr_ld->flags & |
2978 | PNFS_LAYOUTRET_ON_ERROR) { |
2979 | pnfs_return_layout(ino: hdr->inode); |
2980 | } |
2981 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) |
2982 | hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr); |
2983 | } |
2984 | |
2985 | /* |
2986 | * Called by non rpc-based layout drivers |
2987 | */ |
2988 | void pnfs_ld_read_done(struct nfs_pgio_header *hdr) |
2989 | { |
2990 | if (likely(!hdr->pnfs_error)) |
2991 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); |
2992 | trace_nfs4_pnfs_read(hdr, error: hdr->pnfs_error); |
2993 | if (unlikely(hdr->pnfs_error)) |
2994 | pnfs_ld_handle_read_error(hdr); |
2995 | hdr->mds_ops->rpc_release(hdr); |
2996 | } |
2997 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); |
2998 | |
2999 | static void |
3000 | pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, |
3001 | struct nfs_pgio_header *hdr) |
3002 | { |
3003 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
3004 | |
3005 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) { |
3006 | list_splice_tail_init(list: &hdr->pages, head: &mirror->pg_list); |
3007 | nfs_pageio_reset_read_mds(pgio: desc); |
3008 | mirror->pg_recoalesce = 1; |
3009 | } |
3010 | hdr->completion_ops->completion(hdr); |
3011 | } |
3012 | |
3013 | /* |
3014 | * Call the appropriate parallel I/O subsystem read function. |
3015 | */ |
3016 | static enum pnfs_try_status |
3017 | pnfs_try_to_read_data(struct nfs_pgio_header *hdr, |
3018 | const struct rpc_call_ops *call_ops, |
3019 | struct pnfs_layout_segment *lseg) |
3020 | { |
3021 | struct inode *inode = hdr->inode; |
3022 | struct nfs_server *nfss = NFS_SERVER(inode); |
3023 | enum pnfs_try_status trypnfs; |
3024 | |
3025 | hdr->mds_ops = call_ops; |
3026 | |
3027 | dprintk("%s: Reading ino:%lu %u@%llu\n" , |
3028 | __func__, inode->i_ino, hdr->args.count, hdr->args.offset); |
3029 | |
3030 | trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr); |
3031 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
3032 | nfs_inc_stats(inode, stat: NFSIOS_PNFS_READ); |
3033 | dprintk("%s End (trypnfs:%d)\n" , __func__, trypnfs); |
3034 | return trypnfs; |
3035 | } |
3036 | |
3037 | /* Resend all requests through pnfs. */ |
3038 | void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr, |
3039 | unsigned int mirror_idx) |
3040 | { |
3041 | struct nfs_pageio_descriptor pgio; |
3042 | |
3043 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) { |
3044 | /* Prevent deadlocks with layoutreturn! */ |
3045 | pnfs_put_lseg(hdr->lseg); |
3046 | hdr->lseg = NULL; |
3047 | |
3048 | nfs_pageio_init_read(pgio: &pgio, inode: hdr->inode, force_mds: false, |
3049 | compl_ops: hdr->completion_ops); |
3050 | pgio.pg_mirror_idx = mirror_idx; |
3051 | hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr); |
3052 | } |
3053 | } |
3054 | EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs); |
3055 | |
3056 | static void |
3057 | pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) |
3058 | { |
3059 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
3060 | struct pnfs_layout_segment *lseg = desc->pg_lseg; |
3061 | enum pnfs_try_status trypnfs; |
3062 | |
3063 | trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); |
3064 | switch (trypnfs) { |
3065 | case PNFS_NOT_ATTEMPTED: |
3066 | pnfs_read_through_mds(desc, hdr); |
3067 | break; |
3068 | case PNFS_ATTEMPTED: |
3069 | break; |
3070 | case PNFS_TRY_AGAIN: |
3071 | /* cleanup hdr and prepare to redo pnfs */ |
3072 | if (!test_and_set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags)) { |
3073 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
3074 | list_splice_init(list: &hdr->pages, head: &mirror->pg_list); |
3075 | mirror->pg_recoalesce = 1; |
3076 | } |
3077 | hdr->mds_ops->rpc_release(hdr); |
3078 | } |
3079 | } |
3080 | |
3081 | static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) |
3082 | { |
3083 | pnfs_put_lseg(hdr->lseg); |
3084 | nfs_pgio_header_free(hdr); |
3085 | } |
3086 | |
3087 | int |
3088 | pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) |
3089 | { |
3090 | struct nfs_pgio_header *hdr; |
3091 | int ret; |
3092 | |
3093 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
3094 | if (!hdr) { |
3095 | desc->pg_error = -ENOMEM; |
3096 | return desc->pg_error; |
3097 | } |
3098 | nfs_pgheader_init(desc, hdr, release: pnfs_readhdr_free); |
3099 | hdr->lseg = pnfs_get_lseg(lseg: desc->pg_lseg); |
3100 | ret = nfs_generic_pgio(desc, hdr); |
3101 | if (!ret) |
3102 | pnfs_do_read(desc, hdr); |
3103 | return ret; |
3104 | } |
3105 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); |
3106 | |
3107 | static void pnfs_clear_layoutcommitting(struct inode *inode) |
3108 | { |
3109 | unsigned long *bitlock = &NFS_I(inode)->flags; |
3110 | |
3111 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, addr: bitlock); |
3112 | smp_mb__after_atomic(); |
3113 | wake_up_bit(word: bitlock, NFS_INO_LAYOUTCOMMITTING); |
3114 | } |
3115 | |
3116 | /* |
3117 | * There can be multiple RW segments. |
3118 | */ |
3119 | static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) |
3120 | { |
3121 | struct pnfs_layout_segment *lseg; |
3122 | |
3123 | list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { |
3124 | if (lseg->pls_range.iomode == IOMODE_RW && |
3125 | test_and_clear_bit(nr: NFS_LSEG_LAYOUTCOMMIT, addr: &lseg->pls_flags)) |
3126 | list_add(new: &lseg->pls_lc_list, head: listp); |
3127 | } |
3128 | } |
3129 | |
3130 | static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) |
3131 | { |
3132 | struct pnfs_layout_segment *lseg, *tmp; |
3133 | |
3134 | /* Matched by references in pnfs_set_layoutcommit */ |
3135 | list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { |
3136 | list_del_init(entry: &lseg->pls_lc_list); |
3137 | pnfs_put_lseg(lseg); |
3138 | } |
3139 | |
3140 | pnfs_clear_layoutcommitting(inode); |
3141 | } |
3142 | |
3143 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) |
3144 | { |
3145 | pnfs_layout_io_set_failed(lo: lseg->pls_layout, iomode: lseg->pls_range.iomode); |
3146 | } |
3147 | EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); |
3148 | |
3149 | void |
3150 | pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg, |
3151 | loff_t end_pos) |
3152 | { |
3153 | struct nfs_inode *nfsi = NFS_I(inode); |
3154 | bool mark_as_dirty = false; |
3155 | |
3156 | spin_lock(lock: &inode->i_lock); |
3157 | if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, addr: &nfsi->flags)) { |
3158 | nfsi->layout->plh_lwb = end_pos; |
3159 | mark_as_dirty = true; |
3160 | dprintk("%s: Set layoutcommit for inode %lu " , |
3161 | __func__, inode->i_ino); |
3162 | } else if (end_pos > nfsi->layout->plh_lwb) |
3163 | nfsi->layout->plh_lwb = end_pos; |
3164 | if (!test_and_set_bit(nr: NFS_LSEG_LAYOUTCOMMIT, addr: &lseg->pls_flags)) { |
3165 | /* references matched in nfs4_layoutcommit_release */ |
3166 | pnfs_get_lseg(lseg); |
3167 | } |
3168 | spin_unlock(lock: &inode->i_lock); |
3169 | dprintk("%s: lseg %p end_pos %llu\n" , |
3170 | __func__, lseg, nfsi->layout->plh_lwb); |
3171 | |
3172 | /* if pnfs_layoutcommit_inode() runs between inode locks, the next one |
3173 | * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ |
3174 | if (mark_as_dirty) |
3175 | mark_inode_dirty_sync(inode); |
3176 | } |
3177 | EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); |
3178 | |
3179 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) |
3180 | { |
3181 | struct nfs_server *nfss = NFS_SERVER(inode: data->args.inode); |
3182 | |
3183 | if (nfss->pnfs_curr_ld->cleanup_layoutcommit) |
3184 | nfss->pnfs_curr_ld->cleanup_layoutcommit(data); |
3185 | pnfs_list_write_lseg_done(inode: data->args.inode, listp: &data->lseg_list); |
3186 | } |
3187 | |
3188 | /* |
3189 | * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and |
3190 | * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough |
3191 | * data to disk to allow the server to recover the data if it crashes. |
3192 | * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag |
3193 | * is off, and a COMMIT is sent to a data server, or |
3194 | * if WRITEs to a data server return NFS_DATA_SYNC. |
3195 | */ |
3196 | int |
3197 | pnfs_layoutcommit_inode(struct inode *inode, bool sync) |
3198 | { |
3199 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; |
3200 | struct nfs4_layoutcommit_data *data; |
3201 | struct nfs_inode *nfsi = NFS_I(inode); |
3202 | loff_t end_pos; |
3203 | int status; |
3204 | |
3205 | if (!pnfs_layoutcommit_outstanding(inode)) |
3206 | return 0; |
3207 | |
3208 | dprintk("--> %s inode %lu\n" , __func__, inode->i_ino); |
3209 | |
3210 | status = -EAGAIN; |
3211 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, addr: &nfsi->flags)) { |
3212 | if (!sync) |
3213 | goto out; |
3214 | status = wait_on_bit_lock_action(word: &nfsi->flags, |
3215 | NFS_INO_LAYOUTCOMMITTING, |
3216 | action: nfs_wait_bit_killable, |
3217 | TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); |
3218 | if (status) |
3219 | goto out; |
3220 | } |
3221 | |
3222 | status = -ENOMEM; |
3223 | /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ |
3224 | data = kzalloc(size: sizeof(*data), flags: nfs_io_gfp_mask()); |
3225 | if (!data) |
3226 | goto clear_layoutcommitting; |
3227 | |
3228 | status = 0; |
3229 | spin_lock(lock: &inode->i_lock); |
3230 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, addr: &nfsi->flags)) |
3231 | goto out_unlock; |
3232 | |
3233 | INIT_LIST_HEAD(list: &data->lseg_list); |
3234 | pnfs_list_write_lseg(inode, listp: &data->lseg_list); |
3235 | |
3236 | end_pos = nfsi->layout->plh_lwb; |
3237 | |
3238 | nfs4_stateid_copy(dst: &data->args.stateid, src: &nfsi->layout->plh_stateid); |
3239 | data->cred = get_cred(cred: nfsi->layout->plh_lc_cred); |
3240 | spin_unlock(lock: &inode->i_lock); |
3241 | |
3242 | data->args.inode = inode; |
3243 | nfs_fattr_init(fattr: &data->fattr); |
3244 | data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; |
3245 | data->res.fattr = &data->fattr; |
3246 | if (end_pos != 0) |
3247 | data->args.lastbytewritten = end_pos - 1; |
3248 | else |
3249 | data->args.lastbytewritten = U64_MAX; |
3250 | data->res.server = NFS_SERVER(inode); |
3251 | |
3252 | if (ld->prepare_layoutcommit) { |
3253 | status = ld->prepare_layoutcommit(&data->args); |
3254 | if (status) { |
3255 | put_cred(cred: data->cred); |
3256 | spin_lock(lock: &inode->i_lock); |
3257 | set_bit(NFS_INO_LAYOUTCOMMIT, addr: &nfsi->flags); |
3258 | if (end_pos > nfsi->layout->plh_lwb) |
3259 | nfsi->layout->plh_lwb = end_pos; |
3260 | goto out_unlock; |
3261 | } |
3262 | } |
3263 | |
3264 | |
3265 | status = nfs4_proc_layoutcommit(data, sync); |
3266 | out: |
3267 | if (status) |
3268 | mark_inode_dirty_sync(inode); |
3269 | dprintk("<-- %s status %d\n" , __func__, status); |
3270 | return status; |
3271 | out_unlock: |
3272 | spin_unlock(lock: &inode->i_lock); |
3273 | kfree(objp: data); |
3274 | clear_layoutcommitting: |
3275 | pnfs_clear_layoutcommitting(inode); |
3276 | goto out; |
3277 | } |
3278 | EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode); |
3279 | |
3280 | int |
3281 | pnfs_generic_sync(struct inode *inode, bool datasync) |
3282 | { |
3283 | return pnfs_layoutcommit_inode(inode, true); |
3284 | } |
3285 | EXPORT_SYMBOL_GPL(pnfs_generic_sync); |
3286 | |
3287 | struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) |
3288 | { |
3289 | struct nfs4_threshold *thp; |
3290 | |
3291 | thp = kzalloc(size: sizeof(*thp), flags: nfs_io_gfp_mask()); |
3292 | if (!thp) { |
3293 | dprintk("%s mdsthreshold allocation failed\n" , __func__); |
3294 | return NULL; |
3295 | } |
3296 | return thp; |
3297 | } |
3298 | |
3299 | #if IS_ENABLED(CONFIG_NFS_V4_2) |
3300 | int |
3301 | pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags) |
3302 | { |
3303 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; |
3304 | struct nfs_server *server = NFS_SERVER(inode); |
3305 | struct nfs_inode *nfsi = NFS_I(inode); |
3306 | struct nfs42_layoutstat_data *data; |
3307 | struct pnfs_layout_hdr *hdr; |
3308 | int status = 0; |
3309 | |
3310 | if (!pnfs_enabled_sb(nfss: server) || !ld->prepare_layoutstats) |
3311 | goto out; |
3312 | |
3313 | if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS)) |
3314 | goto out; |
3315 | |
3316 | if (test_and_set_bit(NFS_INO_LAYOUTSTATS, addr: &nfsi->flags)) |
3317 | goto out; |
3318 | |
3319 | spin_lock(lock: &inode->i_lock); |
3320 | if (!NFS_I(inode)->layout) { |
3321 | spin_unlock(lock: &inode->i_lock); |
3322 | goto out_clear_layoutstats; |
3323 | } |
3324 | hdr = NFS_I(inode)->layout; |
3325 | pnfs_get_layout_hdr(lo: hdr); |
3326 | spin_unlock(lock: &inode->i_lock); |
3327 | |
3328 | data = kzalloc(size: sizeof(*data), flags: gfp_flags); |
3329 | if (!data) { |
3330 | status = -ENOMEM; |
3331 | goto out_put; |
3332 | } |
3333 | |
3334 | data->args.fh = NFS_FH(inode); |
3335 | data->args.inode = inode; |
3336 | status = ld->prepare_layoutstats(&data->args); |
3337 | if (status) |
3338 | goto out_free; |
3339 | |
3340 | status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data); |
3341 | |
3342 | out: |
3343 | dprintk("%s returns %d\n" , __func__, status); |
3344 | return status; |
3345 | |
3346 | out_free: |
3347 | kfree(objp: data); |
3348 | out_put: |
3349 | pnfs_put_layout_hdr(lo: hdr); |
3350 | out_clear_layoutstats: |
3351 | smp_mb__before_atomic(); |
3352 | clear_bit(NFS_INO_LAYOUTSTATS, addr: &nfsi->flags); |
3353 | smp_mb__after_atomic(); |
3354 | goto out; |
3355 | } |
3356 | EXPORT_SYMBOL_GPL(pnfs_report_layoutstat); |
3357 | #endif |
3358 | |
3359 | unsigned int layoutstats_timer; |
3360 | module_param(layoutstats_timer, uint, 0644); |
3361 | EXPORT_SYMBOL_GPL(layoutstats_timer); |
3362 | |