1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/ceph/ceph_debug.h> |
3 | |
4 | #include <linux/fs.h> |
5 | #include <linux/wait.h> |
6 | #include <linux/slab.h> |
7 | #include <linux/gfp.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/debugfs.h> |
10 | #include <linux/seq_file.h> |
11 | #include <linux/ratelimit.h> |
12 | #include <linux/bits.h> |
13 | #include <linux/ktime.h> |
14 | #include <linux/bitmap.h> |
15 | #include <linux/mnt_idmapping.h> |
16 | |
17 | #include "super.h" |
18 | #include "mds_client.h" |
19 | #include "crypto.h" |
20 | |
21 | #include <linux/ceph/ceph_features.h> |
22 | #include <linux/ceph/messenger.h> |
23 | #include <linux/ceph/decode.h> |
24 | #include <linux/ceph/pagelist.h> |
25 | #include <linux/ceph/auth.h> |
26 | #include <linux/ceph/debugfs.h> |
27 | |
28 | #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE) |
29 | |
30 | /* |
31 | * A cluster of MDS (metadata server) daemons is responsible for |
32 | * managing the file system namespace (the directory hierarchy and |
33 | * inodes) and for coordinating shared access to storage. Metadata is |
34 | * partitioning hierarchically across a number of servers, and that |
35 | * partition varies over time as the cluster adjusts the distribution |
36 | * in order to balance load. |
37 | * |
38 | * The MDS client is primarily responsible to managing synchronous |
39 | * metadata requests for operations like open, unlink, and so forth. |
40 | * If there is a MDS failure, we find out about it when we (possibly |
41 | * request and) receive a new MDS map, and can resubmit affected |
42 | * requests. |
43 | * |
44 | * For the most part, though, we take advantage of a lossless |
45 | * communications channel to the MDS, and do not need to worry about |
46 | * timing out or resubmitting requests. |
47 | * |
48 | * We maintain a stateful "session" with each MDS we interact with. |
49 | * Within each session, we sent periodic heartbeat messages to ensure |
50 | * any capabilities or leases we have been issues remain valid. If |
51 | * the session times out and goes stale, our leases and capabilities |
52 | * are no longer valid. |
53 | */ |
54 | |
55 | struct ceph_reconnect_state { |
56 | struct ceph_mds_session *session; |
57 | int nr_caps, nr_realms; |
58 | struct ceph_pagelist *pagelist; |
59 | unsigned msg_version; |
60 | bool allow_multi; |
61 | }; |
62 | |
63 | static void __wake_requests(struct ceph_mds_client *mdsc, |
64 | struct list_head *head); |
65 | static void ceph_cap_release_work(struct work_struct *work); |
66 | static void ceph_cap_reclaim_work(struct work_struct *work); |
67 | |
68 | static const struct ceph_connection_operations mds_con_ops; |
69 | |
70 | |
71 | /* |
72 | * mds reply parsing |
73 | */ |
74 | |
75 | static int parse_reply_info_quota(void **p, void *end, |
76 | struct ceph_mds_reply_info_in *info) |
77 | { |
78 | u8 struct_v, struct_compat; |
79 | u32 struct_len; |
80 | |
81 | ceph_decode_8_safe(p, end, struct_v, bad); |
82 | ceph_decode_8_safe(p, end, struct_compat, bad); |
83 | /* struct_v is expected to be >= 1. we only |
84 | * understand encoding with struct_compat == 1. */ |
85 | if (!struct_v || struct_compat != 1) |
86 | goto bad; |
87 | ceph_decode_32_safe(p, end, struct_len, bad); |
88 | ceph_decode_need(p, end, struct_len, bad); |
89 | end = *p + struct_len; |
90 | ceph_decode_64_safe(p, end, info->max_bytes, bad); |
91 | ceph_decode_64_safe(p, end, info->max_files, bad); |
92 | *p = end; |
93 | return 0; |
94 | bad: |
95 | return -EIO; |
96 | } |
97 | |
98 | /* |
99 | * parse individual inode info |
100 | */ |
101 | static int parse_reply_info_in(void **p, void *end, |
102 | struct ceph_mds_reply_info_in *info, |
103 | u64 features) |
104 | { |
105 | int err = 0; |
106 | u8 struct_v = 0; |
107 | |
108 | if (features == (u64)-1) { |
109 | u32 struct_len; |
110 | u8 struct_compat; |
111 | ceph_decode_8_safe(p, end, struct_v, bad); |
112 | ceph_decode_8_safe(p, end, struct_compat, bad); |
113 | /* struct_v is expected to be >= 1. we only understand |
114 | * encoding with struct_compat == 1. */ |
115 | if (!struct_v || struct_compat != 1) |
116 | goto bad; |
117 | ceph_decode_32_safe(p, end, struct_len, bad); |
118 | ceph_decode_need(p, end, struct_len, bad); |
119 | end = *p + struct_len; |
120 | } |
121 | |
122 | ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad); |
123 | info->in = *p; |
124 | *p += sizeof(struct ceph_mds_reply_inode) + |
125 | sizeof(*info->in->fragtree.splits) * |
126 | le32_to_cpu(info->in->fragtree.nsplits); |
127 | |
128 | ceph_decode_32_safe(p, end, info->symlink_len, bad); |
129 | ceph_decode_need(p, end, info->symlink_len, bad); |
130 | info->symlink = *p; |
131 | *p += info->symlink_len; |
132 | |
133 | ceph_decode_copy_safe(p, end, &info->dir_layout, |
134 | sizeof(info->dir_layout), bad); |
135 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
136 | ceph_decode_need(p, end, info->xattr_len, bad); |
137 | info->xattr_data = *p; |
138 | *p += info->xattr_len; |
139 | |
140 | if (features == (u64)-1) { |
141 | /* inline data */ |
142 | ceph_decode_64_safe(p, end, info->inline_version, bad); |
143 | ceph_decode_32_safe(p, end, info->inline_len, bad); |
144 | ceph_decode_need(p, end, info->inline_len, bad); |
145 | info->inline_data = *p; |
146 | *p += info->inline_len; |
147 | /* quota */ |
148 | err = parse_reply_info_quota(p, end, info); |
149 | if (err < 0) |
150 | goto out_bad; |
151 | /* pool namespace */ |
152 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); |
153 | if (info->pool_ns_len > 0) { |
154 | ceph_decode_need(p, end, info->pool_ns_len, bad); |
155 | info->pool_ns_data = *p; |
156 | *p += info->pool_ns_len; |
157 | } |
158 | |
159 | /* btime */ |
160 | ceph_decode_need(p, end, sizeof(info->btime), bad); |
161 | ceph_decode_copy(p, pv: &info->btime, n: sizeof(info->btime)); |
162 | |
163 | /* change attribute */ |
164 | ceph_decode_64_safe(p, end, info->change_attr, bad); |
165 | |
166 | /* dir pin */ |
167 | if (struct_v >= 2) { |
168 | ceph_decode_32_safe(p, end, info->dir_pin, bad); |
169 | } else { |
170 | info->dir_pin = -ENODATA; |
171 | } |
172 | |
173 | /* snapshot birth time, remains zero for v<=2 */ |
174 | if (struct_v >= 3) { |
175 | ceph_decode_need(p, end, sizeof(info->snap_btime), bad); |
176 | ceph_decode_copy(p, pv: &info->snap_btime, |
177 | n: sizeof(info->snap_btime)); |
178 | } else { |
179 | memset(&info->snap_btime, 0, sizeof(info->snap_btime)); |
180 | } |
181 | |
182 | /* snapshot count, remains zero for v<=3 */ |
183 | if (struct_v >= 4) { |
184 | ceph_decode_64_safe(p, end, info->rsnaps, bad); |
185 | } else { |
186 | info->rsnaps = 0; |
187 | } |
188 | |
189 | if (struct_v >= 5) { |
190 | u32 alen; |
191 | |
192 | ceph_decode_32_safe(p, end, alen, bad); |
193 | |
194 | while (alen--) { |
195 | u32 len; |
196 | |
197 | /* key */ |
198 | ceph_decode_32_safe(p, end, len, bad); |
199 | ceph_decode_skip_n(p, end, len, bad); |
200 | /* value */ |
201 | ceph_decode_32_safe(p, end, len, bad); |
202 | ceph_decode_skip_n(p, end, len, bad); |
203 | } |
204 | } |
205 | |
206 | /* fscrypt flag -- ignore */ |
207 | if (struct_v >= 6) |
208 | ceph_decode_skip_8(p, end, bad); |
209 | |
210 | info->fscrypt_auth = NULL; |
211 | info->fscrypt_auth_len = 0; |
212 | info->fscrypt_file = NULL; |
213 | info->fscrypt_file_len = 0; |
214 | if (struct_v >= 7) { |
215 | ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad); |
216 | if (info->fscrypt_auth_len) { |
217 | info->fscrypt_auth = kmalloc(size: info->fscrypt_auth_len, |
218 | GFP_KERNEL); |
219 | if (!info->fscrypt_auth) |
220 | return -ENOMEM; |
221 | ceph_decode_copy_safe(p, end, info->fscrypt_auth, |
222 | info->fscrypt_auth_len, bad); |
223 | } |
224 | ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad); |
225 | if (info->fscrypt_file_len) { |
226 | info->fscrypt_file = kmalloc(size: info->fscrypt_file_len, |
227 | GFP_KERNEL); |
228 | if (!info->fscrypt_file) |
229 | return -ENOMEM; |
230 | ceph_decode_copy_safe(p, end, info->fscrypt_file, |
231 | info->fscrypt_file_len, bad); |
232 | } |
233 | } |
234 | *p = end; |
235 | } else { |
236 | /* legacy (unversioned) struct */ |
237 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { |
238 | ceph_decode_64_safe(p, end, info->inline_version, bad); |
239 | ceph_decode_32_safe(p, end, info->inline_len, bad); |
240 | ceph_decode_need(p, end, info->inline_len, bad); |
241 | info->inline_data = *p; |
242 | *p += info->inline_len; |
243 | } else |
244 | info->inline_version = CEPH_INLINE_NONE; |
245 | |
246 | if (features & CEPH_FEATURE_MDS_QUOTA) { |
247 | err = parse_reply_info_quota(p, end, info); |
248 | if (err < 0) |
249 | goto out_bad; |
250 | } else { |
251 | info->max_bytes = 0; |
252 | info->max_files = 0; |
253 | } |
254 | |
255 | info->pool_ns_len = 0; |
256 | info->pool_ns_data = NULL; |
257 | if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { |
258 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); |
259 | if (info->pool_ns_len > 0) { |
260 | ceph_decode_need(p, end, info->pool_ns_len, bad); |
261 | info->pool_ns_data = *p; |
262 | *p += info->pool_ns_len; |
263 | } |
264 | } |
265 | |
266 | if (features & CEPH_FEATURE_FS_BTIME) { |
267 | ceph_decode_need(p, end, sizeof(info->btime), bad); |
268 | ceph_decode_copy(p, pv: &info->btime, n: sizeof(info->btime)); |
269 | ceph_decode_64_safe(p, end, info->change_attr, bad); |
270 | } |
271 | |
272 | info->dir_pin = -ENODATA; |
273 | /* info->snap_btime and info->rsnaps remain zero */ |
274 | } |
275 | return 0; |
276 | bad: |
277 | err = -EIO; |
278 | out_bad: |
279 | return err; |
280 | } |
281 | |
282 | static int parse_reply_info_dir(void **p, void *end, |
283 | struct ceph_mds_reply_dirfrag **dirfrag, |
284 | u64 features) |
285 | { |
286 | if (features == (u64)-1) { |
287 | u8 struct_v, struct_compat; |
288 | u32 struct_len; |
289 | ceph_decode_8_safe(p, end, struct_v, bad); |
290 | ceph_decode_8_safe(p, end, struct_compat, bad); |
291 | /* struct_v is expected to be >= 1. we only understand |
292 | * encoding whose struct_compat == 1. */ |
293 | if (!struct_v || struct_compat != 1) |
294 | goto bad; |
295 | ceph_decode_32_safe(p, end, struct_len, bad); |
296 | ceph_decode_need(p, end, struct_len, bad); |
297 | end = *p + struct_len; |
298 | } |
299 | |
300 | ceph_decode_need(p, end, sizeof(**dirfrag), bad); |
301 | *dirfrag = *p; |
302 | *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist); |
303 | if (unlikely(*p > end)) |
304 | goto bad; |
305 | if (features == (u64)-1) |
306 | *p = end; |
307 | return 0; |
308 | bad: |
309 | return -EIO; |
310 | } |
311 | |
312 | static int parse_reply_info_lease(void **p, void *end, |
313 | struct ceph_mds_reply_lease **lease, |
314 | u64 features, u32 *altname_len, u8 **altname) |
315 | { |
316 | u8 struct_v; |
317 | u32 struct_len; |
318 | void *lend; |
319 | |
320 | if (features == (u64)-1) { |
321 | u8 struct_compat; |
322 | |
323 | ceph_decode_8_safe(p, end, struct_v, bad); |
324 | ceph_decode_8_safe(p, end, struct_compat, bad); |
325 | |
326 | /* struct_v is expected to be >= 1. we only understand |
327 | * encoding whose struct_compat == 1. */ |
328 | if (!struct_v || struct_compat != 1) |
329 | goto bad; |
330 | |
331 | ceph_decode_32_safe(p, end, struct_len, bad); |
332 | } else { |
333 | struct_len = sizeof(**lease); |
334 | *altname_len = 0; |
335 | *altname = NULL; |
336 | } |
337 | |
338 | lend = *p + struct_len; |
339 | ceph_decode_need(p, end, struct_len, bad); |
340 | *lease = *p; |
341 | *p += sizeof(**lease); |
342 | |
343 | if (features == (u64)-1) { |
344 | if (struct_v >= 2) { |
345 | ceph_decode_32_safe(p, end, *altname_len, bad); |
346 | ceph_decode_need(p, end, *altname_len, bad); |
347 | *altname = *p; |
348 | *p += *altname_len; |
349 | } else { |
350 | *altname = NULL; |
351 | *altname_len = 0; |
352 | } |
353 | } |
354 | *p = lend; |
355 | return 0; |
356 | bad: |
357 | return -EIO; |
358 | } |
359 | |
360 | /* |
361 | * parse a normal reply, which may contain a (dir+)dentry and/or a |
362 | * target inode. |
363 | */ |
364 | static int parse_reply_info_trace(void **p, void *end, |
365 | struct ceph_mds_reply_info_parsed *info, |
366 | u64 features) |
367 | { |
368 | int err; |
369 | |
370 | if (info->head->is_dentry) { |
371 | err = parse_reply_info_in(p, end, info: &info->diri, features); |
372 | if (err < 0) |
373 | goto out_bad; |
374 | |
375 | err = parse_reply_info_dir(p, end, dirfrag: &info->dirfrag, features); |
376 | if (err < 0) |
377 | goto out_bad; |
378 | |
379 | ceph_decode_32_safe(p, end, info->dname_len, bad); |
380 | ceph_decode_need(p, end, info->dname_len, bad); |
381 | info->dname = *p; |
382 | *p += info->dname_len; |
383 | |
384 | err = parse_reply_info_lease(p, end, lease: &info->dlease, features, |
385 | altname_len: &info->altname_len, altname: &info->altname); |
386 | if (err < 0) |
387 | goto out_bad; |
388 | } |
389 | |
390 | if (info->head->is_target) { |
391 | err = parse_reply_info_in(p, end, info: &info->targeti, features); |
392 | if (err < 0) |
393 | goto out_bad; |
394 | } |
395 | |
396 | if (unlikely(*p != end)) |
397 | goto bad; |
398 | return 0; |
399 | |
400 | bad: |
401 | err = -EIO; |
402 | out_bad: |
403 | pr_err("problem parsing mds trace %d\n" , err); |
404 | return err; |
405 | } |
406 | |
407 | /* |
408 | * parse readdir results |
409 | */ |
410 | static int parse_reply_info_readdir(void **p, void *end, |
411 | struct ceph_mds_request *req, |
412 | u64 features) |
413 | { |
414 | struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; |
415 | struct ceph_client *cl = req->r_mdsc->fsc->client; |
416 | u32 num, i = 0; |
417 | int err; |
418 | |
419 | err = parse_reply_info_dir(p, end, dirfrag: &info->dir_dir, features); |
420 | if (err < 0) |
421 | goto out_bad; |
422 | |
423 | ceph_decode_need(p, end, sizeof(num) + 2, bad); |
424 | num = ceph_decode_32(p); |
425 | { |
426 | u16 flags = ceph_decode_16(p); |
427 | info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); |
428 | info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); |
429 | info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); |
430 | info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); |
431 | } |
432 | if (num == 0) |
433 | goto done; |
434 | |
435 | BUG_ON(!info->dir_entries); |
436 | if ((unsigned long)(info->dir_entries + num) > |
437 | (unsigned long)info->dir_entries + info->dir_buf_size) { |
438 | pr_err_client(cl, "dir contents are larger than expected\n" ); |
439 | WARN_ON(1); |
440 | goto bad; |
441 | } |
442 | |
443 | info->dir_nr = num; |
444 | while (num) { |
445 | struct inode *inode = d_inode(dentry: req->r_dentry); |
446 | struct ceph_inode_info *ci = ceph_inode(inode); |
447 | struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; |
448 | struct fscrypt_str tname = FSTR_INIT(NULL, 0); |
449 | struct fscrypt_str oname = FSTR_INIT(NULL, 0); |
450 | struct ceph_fname fname; |
451 | u32 altname_len, _name_len; |
452 | u8 *altname, *_name; |
453 | |
454 | /* dentry */ |
455 | ceph_decode_32_safe(p, end, _name_len, bad); |
456 | ceph_decode_need(p, end, _name_len, bad); |
457 | _name = *p; |
458 | *p += _name_len; |
459 | doutc(cl, "parsed dir dname '%.*s'\n" , _name_len, _name); |
460 | |
461 | if (info->hash_order) |
462 | rde->raw_hash = ceph_str_hash(type: ci->i_dir_layout.dl_dir_hash, |
463 | s: _name, len: _name_len); |
464 | |
465 | /* dentry lease */ |
466 | err = parse_reply_info_lease(p, end, lease: &rde->lease, features, |
467 | altname_len: &altname_len, altname: &altname); |
468 | if (err) |
469 | goto out_bad; |
470 | |
471 | /* |
472 | * Try to dencrypt the dentry names and update them |
473 | * in the ceph_mds_reply_dir_entry struct. |
474 | */ |
475 | fname.dir = inode; |
476 | fname.name = _name; |
477 | fname.name_len = _name_len; |
478 | fname.ctext = altname; |
479 | fname.ctext_len = altname_len; |
480 | /* |
481 | * The _name_len maybe larger than altname_len, such as |
482 | * when the human readable name length is in range of |
483 | * (CEPH_NOHASH_NAME_MAX, CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE), |
484 | * then the copy in ceph_fname_to_usr will corrupt the |
485 | * data if there has no encryption key. |
486 | * |
487 | * Just set the no_copy flag and then if there has no |
488 | * encryption key the oname.name will be assigned to |
489 | * _name always. |
490 | */ |
491 | fname.no_copy = true; |
492 | if (altname_len == 0) { |
493 | /* |
494 | * Set tname to _name, and this will be used |
495 | * to do the base64_decode in-place. It's |
496 | * safe because the decoded string should |
497 | * always be shorter, which is 3/4 of origin |
498 | * string. |
499 | */ |
500 | tname.name = _name; |
501 | |
502 | /* |
503 | * Set oname to _name too, and this will be |
504 | * used to do the dencryption in-place. |
505 | */ |
506 | oname.name = _name; |
507 | oname.len = _name_len; |
508 | } else { |
509 | /* |
510 | * This will do the decryption only in-place |
511 | * from altname cryptext directly. |
512 | */ |
513 | oname.name = altname; |
514 | oname.len = altname_len; |
515 | } |
516 | rde->is_nokey = false; |
517 | err = ceph_fname_to_usr(fname: &fname, tname: &tname, oname: &oname, is_nokey: &rde->is_nokey); |
518 | if (err) { |
519 | pr_err_client(cl, "unable to decode %.*s, got %d\n" , |
520 | _name_len, _name, err); |
521 | goto out_bad; |
522 | } |
523 | rde->name = oname.name; |
524 | rde->name_len = oname.len; |
525 | |
526 | /* inode */ |
527 | err = parse_reply_info_in(p, end, info: &rde->inode, features); |
528 | if (err < 0) |
529 | goto out_bad; |
530 | /* ceph_readdir_prepopulate() will update it */ |
531 | rde->offset = 0; |
532 | i++; |
533 | num--; |
534 | } |
535 | |
536 | done: |
537 | /* Skip over any unrecognized fields */ |
538 | *p = end; |
539 | return 0; |
540 | |
541 | bad: |
542 | err = -EIO; |
543 | out_bad: |
544 | pr_err_client(cl, "problem parsing dir contents %d\n" , err); |
545 | return err; |
546 | } |
547 | |
548 | /* |
549 | * parse fcntl F_GETLK results |
550 | */ |
551 | static int parse_reply_info_filelock(void **p, void *end, |
552 | struct ceph_mds_reply_info_parsed *info, |
553 | u64 features) |
554 | { |
555 | if (*p + sizeof(*info->filelock_reply) > end) |
556 | goto bad; |
557 | |
558 | info->filelock_reply = *p; |
559 | |
560 | /* Skip over any unrecognized fields */ |
561 | *p = end; |
562 | return 0; |
563 | bad: |
564 | return -EIO; |
565 | } |
566 | |
567 | |
568 | #if BITS_PER_LONG == 64 |
569 | |
570 | #define DELEGATED_INO_AVAILABLE xa_mk_value(1) |
571 | |
572 | static int ceph_parse_deleg_inos(void **p, void *end, |
573 | struct ceph_mds_session *s) |
574 | { |
575 | struct ceph_client *cl = s->s_mdsc->fsc->client; |
576 | u32 sets; |
577 | |
578 | ceph_decode_32_safe(p, end, sets, bad); |
579 | doutc(cl, "got %u sets of delegated inodes\n" , sets); |
580 | while (sets--) { |
581 | u64 start, len; |
582 | |
583 | ceph_decode_64_safe(p, end, start, bad); |
584 | ceph_decode_64_safe(p, end, len, bad); |
585 | |
586 | /* Don't accept a delegation of system inodes */ |
587 | if (start < CEPH_INO_SYSTEM_BASE) { |
588 | pr_warn_ratelimited_client(cl, |
589 | "ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n" , |
590 | start, len); |
591 | continue; |
592 | } |
593 | while (len--) { |
594 | int err = xa_insert(xa: &s->s_delegated_inos, index: start++, |
595 | DELEGATED_INO_AVAILABLE, |
596 | GFP_KERNEL); |
597 | if (!err) { |
598 | doutc(cl, "added delegated inode 0x%llx\n" , start - 1); |
599 | } else if (err == -EBUSY) { |
600 | pr_warn_client(cl, |
601 | "MDS delegated inode 0x%llx more than once.\n" , |
602 | start - 1); |
603 | } else { |
604 | return err; |
605 | } |
606 | } |
607 | } |
608 | return 0; |
609 | bad: |
610 | return -EIO; |
611 | } |
612 | |
613 | u64 ceph_get_deleg_ino(struct ceph_mds_session *s) |
614 | { |
615 | unsigned long ino; |
616 | void *val; |
617 | |
618 | xa_for_each(&s->s_delegated_inos, ino, val) { |
619 | val = xa_erase(&s->s_delegated_inos, index: ino); |
620 | if (val == DELEGATED_INO_AVAILABLE) |
621 | return ino; |
622 | } |
623 | return 0; |
624 | } |
625 | |
626 | int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino) |
627 | { |
628 | return xa_insert(xa: &s->s_delegated_inos, index: ino, DELEGATED_INO_AVAILABLE, |
629 | GFP_KERNEL); |
630 | } |
631 | #else /* BITS_PER_LONG == 64 */ |
632 | /* |
633 | * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just |
634 | * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top |
635 | * and bottom words? |
636 | */ |
637 | static int ceph_parse_deleg_inos(void **p, void *end, |
638 | struct ceph_mds_session *s) |
639 | { |
640 | u32 sets; |
641 | |
642 | ceph_decode_32_safe(p, end, sets, bad); |
643 | if (sets) |
644 | ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad); |
645 | return 0; |
646 | bad: |
647 | return -EIO; |
648 | } |
649 | |
650 | u64 ceph_get_deleg_ino(struct ceph_mds_session *s) |
651 | { |
652 | return 0; |
653 | } |
654 | |
655 | int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino) |
656 | { |
657 | return 0; |
658 | } |
659 | #endif /* BITS_PER_LONG == 64 */ |
660 | |
661 | /* |
662 | * parse create results |
663 | */ |
664 | static int parse_reply_info_create(void **p, void *end, |
665 | struct ceph_mds_reply_info_parsed *info, |
666 | u64 features, struct ceph_mds_session *s) |
667 | { |
668 | int ret; |
669 | |
670 | if (features == (u64)-1 || |
671 | (features & CEPH_FEATURE_REPLY_CREATE_INODE)) { |
672 | if (*p == end) { |
673 | /* Malformed reply? */ |
674 | info->has_create_ino = false; |
675 | } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) { |
676 | info->has_create_ino = true; |
677 | /* struct_v, struct_compat, and len */ |
678 | ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad); |
679 | ceph_decode_64_safe(p, end, info->ino, bad); |
680 | ret = ceph_parse_deleg_inos(p, end, s); |
681 | if (ret) |
682 | return ret; |
683 | } else { |
684 | /* legacy */ |
685 | ceph_decode_64_safe(p, end, info->ino, bad); |
686 | info->has_create_ino = true; |
687 | } |
688 | } else { |
689 | if (*p != end) |
690 | goto bad; |
691 | } |
692 | |
693 | /* Skip over any unrecognized fields */ |
694 | *p = end; |
695 | return 0; |
696 | bad: |
697 | return -EIO; |
698 | } |
699 | |
700 | static int parse_reply_info_getvxattr(void **p, void *end, |
701 | struct ceph_mds_reply_info_parsed *info, |
702 | u64 features) |
703 | { |
704 | u32 value_len; |
705 | |
706 | ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */ |
707 | ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */ |
708 | ceph_decode_skip_32(p, end, bad); /* skip payload length */ |
709 | |
710 | ceph_decode_32_safe(p, end, value_len, bad); |
711 | |
712 | if (value_len == end - *p) { |
713 | info->xattr_info.xattr_value = *p; |
714 | info->xattr_info.xattr_value_len = value_len; |
715 | *p = end; |
716 | return value_len; |
717 | } |
718 | bad: |
719 | return -EIO; |
720 | } |
721 | |
722 | /* |
723 | * parse extra results |
724 | */ |
725 | static int (void **p, void *end, |
726 | struct ceph_mds_request *req, |
727 | u64 features, struct ceph_mds_session *s) |
728 | { |
729 | struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; |
730 | u32 op = le32_to_cpu(info->head->op); |
731 | |
732 | if (op == CEPH_MDS_OP_GETFILELOCK) |
733 | return parse_reply_info_filelock(p, end, info, features); |
734 | else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
735 | return parse_reply_info_readdir(p, end, req, features); |
736 | else if (op == CEPH_MDS_OP_CREATE) |
737 | return parse_reply_info_create(p, end, info, features, s); |
738 | else if (op == CEPH_MDS_OP_GETVXATTR) |
739 | return parse_reply_info_getvxattr(p, end, info, features); |
740 | else |
741 | return -EIO; |
742 | } |
743 | |
744 | /* |
745 | * parse entire mds reply |
746 | */ |
747 | static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg, |
748 | struct ceph_mds_request *req, u64 features) |
749 | { |
750 | struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; |
751 | struct ceph_client *cl = s->s_mdsc->fsc->client; |
752 | void *p, *end; |
753 | u32 len; |
754 | int err; |
755 | |
756 | info->head = msg->front.iov_base; |
757 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); |
758 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); |
759 | |
760 | /* trace */ |
761 | ceph_decode_32_safe(&p, end, len, bad); |
762 | if (len > 0) { |
763 | ceph_decode_need(&p, end, len, bad); |
764 | err = parse_reply_info_trace(p: &p, end: p+len, info, features); |
765 | if (err < 0) |
766 | goto out_bad; |
767 | } |
768 | |
769 | /* extra */ |
770 | ceph_decode_32_safe(&p, end, len, bad); |
771 | if (len > 0) { |
772 | ceph_decode_need(&p, end, len, bad); |
773 | err = parse_reply_info_extra(p: &p, end: p+len, req, features, s); |
774 | if (err < 0) |
775 | goto out_bad; |
776 | } |
777 | |
778 | /* snap blob */ |
779 | ceph_decode_32_safe(&p, end, len, bad); |
780 | info->snapblob_len = len; |
781 | info->snapblob = p; |
782 | p += len; |
783 | |
784 | if (p != end) |
785 | goto bad; |
786 | return 0; |
787 | |
788 | bad: |
789 | err = -EIO; |
790 | out_bad: |
791 | pr_err_client(cl, "mds parse_reply err %d\n" , err); |
792 | ceph_msg_dump(msg); |
793 | return err; |
794 | } |
795 | |
796 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) |
797 | { |
798 | int i; |
799 | |
800 | kfree(objp: info->diri.fscrypt_auth); |
801 | kfree(objp: info->diri.fscrypt_file); |
802 | kfree(objp: info->targeti.fscrypt_auth); |
803 | kfree(objp: info->targeti.fscrypt_file); |
804 | if (!info->dir_entries) |
805 | return; |
806 | |
807 | for (i = 0; i < info->dir_nr; i++) { |
808 | struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; |
809 | |
810 | kfree(objp: rde->inode.fscrypt_auth); |
811 | kfree(objp: rde->inode.fscrypt_file); |
812 | } |
813 | free_pages(addr: (unsigned long)info->dir_entries, order: get_order(size: info->dir_buf_size)); |
814 | } |
815 | |
816 | /* |
817 | * In async unlink case the kclient won't wait for the first reply |
818 | * from MDS and just drop all the links and unhash the dentry and then |
819 | * succeeds immediately. |
820 | * |
821 | * For any new create/link/rename,etc requests followed by using the |
822 | * same file names we must wait for the first reply of the inflight |
823 | * unlink request, or the MDS possibly will fail these following |
824 | * requests with -EEXIST if the inflight async unlink request was |
825 | * delayed for some reasons. |
826 | * |
827 | * And the worst case is that for the none async openc request it will |
828 | * successfully open the file if the CDentry hasn't been unlinked yet, |
829 | * but later the previous delayed async unlink request will remove the |
830 | * CDenty. That means the just created file is possiblly deleted later |
831 | * by accident. |
832 | * |
833 | * We need to wait for the inflight async unlink requests to finish |
834 | * when creating new files/directories by using the same file names. |
835 | */ |
836 | int ceph_wait_on_conflict_unlink(struct dentry *dentry) |
837 | { |
838 | struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb: dentry->d_sb); |
839 | struct ceph_client *cl = fsc->client; |
840 | struct dentry *pdentry = dentry->d_parent; |
841 | struct dentry *udentry, *found = NULL; |
842 | struct ceph_dentry_info *di; |
843 | struct qstr dname; |
844 | u32 hash = dentry->d_name.hash; |
845 | int err; |
846 | |
847 | dname.name = dentry->d_name.name; |
848 | dname.len = dentry->d_name.len; |
849 | |
850 | rcu_read_lock(); |
851 | hash_for_each_possible_rcu(fsc->async_unlink_conflict, di, |
852 | hnode, hash) { |
853 | udentry = di->dentry; |
854 | |
855 | spin_lock(lock: &udentry->d_lock); |
856 | if (udentry->d_name.hash != hash) |
857 | goto next; |
858 | if (unlikely(udentry->d_parent != pdentry)) |
859 | goto next; |
860 | if (!hash_hashed(node: &di->hnode)) |
861 | goto next; |
862 | |
863 | if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags)) |
864 | pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n" , |
865 | dentry, dentry); |
866 | |
867 | if (!d_same_name(dentry: udentry, parent: pdentry, name: &dname)) |
868 | goto next; |
869 | |
870 | found = dget_dlock(dentry: udentry); |
871 | spin_unlock(lock: &udentry->d_lock); |
872 | break; |
873 | next: |
874 | spin_unlock(lock: &udentry->d_lock); |
875 | } |
876 | rcu_read_unlock(); |
877 | |
878 | if (likely(!found)) |
879 | return 0; |
880 | |
881 | doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n" , dentry, dentry, |
882 | found, found); |
883 | |
884 | err = wait_on_bit(word: &di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT, |
885 | TASK_KILLABLE); |
886 | dput(found); |
887 | return err; |
888 | } |
889 | |
890 | |
891 | /* |
892 | * sessions |
893 | */ |
894 | const char *ceph_session_state_name(int s) |
895 | { |
896 | switch (s) { |
897 | case CEPH_MDS_SESSION_NEW: return "new" ; |
898 | case CEPH_MDS_SESSION_OPENING: return "opening" ; |
899 | case CEPH_MDS_SESSION_OPEN: return "open" ; |
900 | case CEPH_MDS_SESSION_HUNG: return "hung" ; |
901 | case CEPH_MDS_SESSION_CLOSING: return "closing" ; |
902 | case CEPH_MDS_SESSION_CLOSED: return "closed" ; |
903 | case CEPH_MDS_SESSION_RESTARTING: return "restarting" ; |
904 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting" ; |
905 | case CEPH_MDS_SESSION_REJECTED: return "rejected" ; |
906 | default: return "???" ; |
907 | } |
908 | } |
909 | |
910 | struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s) |
911 | { |
912 | if (refcount_inc_not_zero(r: &s->s_ref)) |
913 | return s; |
914 | return NULL; |
915 | } |
916 | |
917 | void ceph_put_mds_session(struct ceph_mds_session *s) |
918 | { |
919 | if (IS_ERR_OR_NULL(ptr: s)) |
920 | return; |
921 | |
922 | if (refcount_dec_and_test(r: &s->s_ref)) { |
923 | if (s->s_auth.authorizer) |
924 | ceph_auth_destroy_authorizer(a: s->s_auth.authorizer); |
925 | WARN_ON(mutex_is_locked(&s->s_mutex)); |
926 | xa_destroy(&s->s_delegated_inos); |
927 | kfree(objp: s); |
928 | } |
929 | } |
930 | |
931 | /* |
932 | * called under mdsc->mutex |
933 | */ |
934 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, |
935 | int mds) |
936 | { |
937 | if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) |
938 | return NULL; |
939 | return ceph_get_mds_session(s: mdsc->sessions[mds]); |
940 | } |
941 | |
942 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) |
943 | { |
944 | if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) |
945 | return false; |
946 | else |
947 | return true; |
948 | } |
949 | |
950 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
951 | struct ceph_mds_session *s) |
952 | { |
953 | if (s->s_mds >= mdsc->max_sessions || |
954 | mdsc->sessions[s->s_mds] != s) |
955 | return -ENOENT; |
956 | return 0; |
957 | } |
958 | |
959 | /* |
960 | * create+register a new session for given mds. |
961 | * called under mdsc->mutex. |
962 | */ |
963 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, |
964 | int mds) |
965 | { |
966 | struct ceph_client *cl = mdsc->fsc->client; |
967 | struct ceph_mds_session *s; |
968 | |
969 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) |
970 | return ERR_PTR(error: -EIO); |
971 | |
972 | if (mds >= mdsc->mdsmap->possible_max_rank) |
973 | return ERR_PTR(error: -EINVAL); |
974 | |
975 | s = kzalloc(size: sizeof(*s), GFP_NOFS); |
976 | if (!s) |
977 | return ERR_PTR(error: -ENOMEM); |
978 | |
979 | if (mds >= mdsc->max_sessions) { |
980 | int newmax = 1 << get_count_order(count: mds + 1); |
981 | struct ceph_mds_session **sa; |
982 | |
983 | doutc(cl, "realloc to %d\n" , newmax); |
984 | sa = kcalloc(n: newmax, size: sizeof(void *), GFP_NOFS); |
985 | if (!sa) |
986 | goto fail_realloc; |
987 | if (mdsc->sessions) { |
988 | memcpy(sa, mdsc->sessions, |
989 | mdsc->max_sessions * sizeof(void *)); |
990 | kfree(objp: mdsc->sessions); |
991 | } |
992 | mdsc->sessions = sa; |
993 | mdsc->max_sessions = newmax; |
994 | } |
995 | |
996 | doutc(cl, "mds%d\n" , mds); |
997 | s->s_mdsc = mdsc; |
998 | s->s_mds = mds; |
999 | s->s_state = CEPH_MDS_SESSION_NEW; |
1000 | mutex_init(&s->s_mutex); |
1001 | |
1002 | ceph_con_init(con: &s->s_con, private: s, ops: &mds_con_ops, msgr: &mdsc->fsc->client->msgr); |
1003 | |
1004 | atomic_set(v: &s->s_cap_gen, i: 1); |
1005 | s->s_cap_ttl = jiffies - 1; |
1006 | |
1007 | spin_lock_init(&s->s_cap_lock); |
1008 | INIT_LIST_HEAD(list: &s->s_caps); |
1009 | refcount_set(r: &s->s_ref, n: 1); |
1010 | INIT_LIST_HEAD(list: &s->s_waiting); |
1011 | INIT_LIST_HEAD(list: &s->s_unsafe); |
1012 | xa_init(xa: &s->s_delegated_inos); |
1013 | INIT_LIST_HEAD(list: &s->s_cap_releases); |
1014 | INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work); |
1015 | |
1016 | INIT_LIST_HEAD(list: &s->s_cap_dirty); |
1017 | INIT_LIST_HEAD(list: &s->s_cap_flushing); |
1018 | |
1019 | mdsc->sessions[mds] = s; |
1020 | atomic_inc(v: &mdsc->num_sessions); |
1021 | refcount_inc(r: &s->s_ref); /* one ref to sessions[], one to caller */ |
1022 | |
1023 | ceph_con_open(con: &s->s_con, CEPH_ENTITY_TYPE_MDS, entity_num: mds, |
1024 | addr: ceph_mdsmap_get_addr(m: mdsc->mdsmap, w: mds)); |
1025 | |
1026 | return s; |
1027 | |
1028 | fail_realloc: |
1029 | kfree(objp: s); |
1030 | return ERR_PTR(error: -ENOMEM); |
1031 | } |
1032 | |
1033 | /* |
1034 | * called under mdsc->mutex |
1035 | */ |
1036 | static void __unregister_session(struct ceph_mds_client *mdsc, |
1037 | struct ceph_mds_session *s) |
1038 | { |
1039 | doutc(mdsc->fsc->client, "mds%d %p\n" , s->s_mds, s); |
1040 | BUG_ON(mdsc->sessions[s->s_mds] != s); |
1041 | mdsc->sessions[s->s_mds] = NULL; |
1042 | ceph_con_close(con: &s->s_con); |
1043 | ceph_put_mds_session(s); |
1044 | atomic_dec(v: &mdsc->num_sessions); |
1045 | } |
1046 | |
1047 | /* |
1048 | * drop session refs in request. |
1049 | * |
1050 | * should be last request ref, or hold mdsc->mutex |
1051 | */ |
1052 | static void put_request_session(struct ceph_mds_request *req) |
1053 | { |
1054 | if (req->r_session) { |
1055 | ceph_put_mds_session(s: req->r_session); |
1056 | req->r_session = NULL; |
1057 | } |
1058 | } |
1059 | |
1060 | void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, |
1061 | void (*cb)(struct ceph_mds_session *), |
1062 | bool check_state) |
1063 | { |
1064 | int mds; |
1065 | |
1066 | mutex_lock(&mdsc->mutex); |
1067 | for (mds = 0; mds < mdsc->max_sessions; ++mds) { |
1068 | struct ceph_mds_session *s; |
1069 | |
1070 | s = __ceph_lookup_mds_session(mdsc, mds); |
1071 | if (!s) |
1072 | continue; |
1073 | |
1074 | if (check_state && !check_session_state(s)) { |
1075 | ceph_put_mds_session(s); |
1076 | continue; |
1077 | } |
1078 | |
1079 | mutex_unlock(lock: &mdsc->mutex); |
1080 | cb(s); |
1081 | ceph_put_mds_session(s); |
1082 | mutex_lock(&mdsc->mutex); |
1083 | } |
1084 | mutex_unlock(lock: &mdsc->mutex); |
1085 | } |
1086 | |
1087 | void ceph_mdsc_release_request(struct kref *kref) |
1088 | { |
1089 | struct ceph_mds_request *req = container_of(kref, |
1090 | struct ceph_mds_request, |
1091 | r_kref); |
1092 | ceph_mdsc_release_dir_caps_async(req); |
1093 | destroy_reply_info(info: &req->r_reply_info); |
1094 | if (req->r_request) |
1095 | ceph_msg_put(msg: req->r_request); |
1096 | if (req->r_reply) |
1097 | ceph_msg_put(msg: req->r_reply); |
1098 | if (req->r_inode) { |
1099 | ceph_put_cap_refs(ci: ceph_inode(inode: req->r_inode), CEPH_CAP_PIN); |
1100 | iput(req->r_inode); |
1101 | } |
1102 | if (req->r_parent) { |
1103 | ceph_put_cap_refs(ci: ceph_inode(inode: req->r_parent), CEPH_CAP_PIN); |
1104 | iput(req->r_parent); |
1105 | } |
1106 | iput(req->r_target_inode); |
1107 | iput(req->r_new_inode); |
1108 | if (req->r_dentry) |
1109 | dput(req->r_dentry); |
1110 | if (req->r_old_dentry) |
1111 | dput(req->r_old_dentry); |
1112 | if (req->r_old_dentry_dir) { |
1113 | /* |
1114 | * track (and drop pins for) r_old_dentry_dir |
1115 | * separately, since r_old_dentry's d_parent may have |
1116 | * changed between the dir mutex being dropped and |
1117 | * this request being freed. |
1118 | */ |
1119 | ceph_put_cap_refs(ci: ceph_inode(inode: req->r_old_dentry_dir), |
1120 | CEPH_CAP_PIN); |
1121 | iput(req->r_old_dentry_dir); |
1122 | } |
1123 | kfree(objp: req->r_path1); |
1124 | kfree(objp: req->r_path2); |
1125 | put_cred(cred: req->r_cred); |
1126 | if (req->r_mnt_idmap) |
1127 | mnt_idmap_put(idmap: req->r_mnt_idmap); |
1128 | if (req->r_pagelist) |
1129 | ceph_pagelist_release(pl: req->r_pagelist); |
1130 | kfree(objp: req->r_fscrypt_auth); |
1131 | kfree(objp: req->r_altname); |
1132 | put_request_session(req); |
1133 | ceph_unreserve_caps(mdsc: req->r_mdsc, ctx: &req->r_caps_reservation); |
1134 | WARN_ON_ONCE(!list_empty(&req->r_wait)); |
1135 | kmem_cache_free(s: ceph_mds_request_cachep, objp: req); |
1136 | } |
1137 | |
1138 | DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) |
1139 | |
1140 | /* |
1141 | * lookup session, bump ref if found. |
1142 | * |
1143 | * called under mdsc->mutex. |
1144 | */ |
1145 | static struct ceph_mds_request * |
1146 | lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) |
1147 | { |
1148 | struct ceph_mds_request *req; |
1149 | |
1150 | req = lookup_request(root: &mdsc->request_tree, key: tid); |
1151 | if (req) |
1152 | ceph_mdsc_get_request(req); |
1153 | |
1154 | return req; |
1155 | } |
1156 | |
1157 | /* |
1158 | * Register an in-flight request, and assign a tid. Link to directory |
1159 | * are modifying (if any). |
1160 | * |
1161 | * Called under mdsc->mutex. |
1162 | */ |
1163 | static void __register_request(struct ceph_mds_client *mdsc, |
1164 | struct ceph_mds_request *req, |
1165 | struct inode *dir) |
1166 | { |
1167 | struct ceph_client *cl = mdsc->fsc->client; |
1168 | int ret = 0; |
1169 | |
1170 | req->r_tid = ++mdsc->last_tid; |
1171 | if (req->r_num_caps) { |
1172 | ret = ceph_reserve_caps(mdsc, ctx: &req->r_caps_reservation, |
1173 | need: req->r_num_caps); |
1174 | if (ret < 0) { |
1175 | pr_err_client(cl, "%p failed to reserve caps: %d\n" , |
1176 | req, ret); |
1177 | /* set req->r_err to fail early from __do_request */ |
1178 | req->r_err = ret; |
1179 | return; |
1180 | } |
1181 | } |
1182 | doutc(cl, "%p tid %lld\n" , req, req->r_tid); |
1183 | ceph_mdsc_get_request(req); |
1184 | insert_request(root: &mdsc->request_tree, t: req); |
1185 | |
1186 | req->r_cred = get_current_cred(); |
1187 | if (!req->r_mnt_idmap) |
1188 | req->r_mnt_idmap = &nop_mnt_idmap; |
1189 | |
1190 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
1191 | mdsc->oldest_tid = req->r_tid; |
1192 | |
1193 | if (dir) { |
1194 | struct ceph_inode_info *ci = ceph_inode(inode: dir); |
1195 | |
1196 | ihold(inode: dir); |
1197 | req->r_unsafe_dir = dir; |
1198 | spin_lock(lock: &ci->i_unsafe_lock); |
1199 | list_add_tail(new: &req->r_unsafe_dir_item, head: &ci->i_unsafe_dirops); |
1200 | spin_unlock(lock: &ci->i_unsafe_lock); |
1201 | } |
1202 | } |
1203 | |
1204 | static void __unregister_request(struct ceph_mds_client *mdsc, |
1205 | struct ceph_mds_request *req) |
1206 | { |
1207 | doutc(mdsc->fsc->client, "%p tid %lld\n" , req, req->r_tid); |
1208 | |
1209 | /* Never leave an unregistered request on an unsafe list! */ |
1210 | list_del_init(entry: &req->r_unsafe_item); |
1211 | |
1212 | if (req->r_tid == mdsc->oldest_tid) { |
1213 | struct rb_node *p = rb_next(&req->r_node); |
1214 | mdsc->oldest_tid = 0; |
1215 | while (p) { |
1216 | struct ceph_mds_request *next_req = |
1217 | rb_entry(p, struct ceph_mds_request, r_node); |
1218 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { |
1219 | mdsc->oldest_tid = next_req->r_tid; |
1220 | break; |
1221 | } |
1222 | p = rb_next(p); |
1223 | } |
1224 | } |
1225 | |
1226 | erase_request(root: &mdsc->request_tree, t: req); |
1227 | |
1228 | if (req->r_unsafe_dir) { |
1229 | struct ceph_inode_info *ci = ceph_inode(inode: req->r_unsafe_dir); |
1230 | spin_lock(lock: &ci->i_unsafe_lock); |
1231 | list_del_init(entry: &req->r_unsafe_dir_item); |
1232 | spin_unlock(lock: &ci->i_unsafe_lock); |
1233 | } |
1234 | if (req->r_target_inode && |
1235 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
1236 | struct ceph_inode_info *ci = ceph_inode(inode: req->r_target_inode); |
1237 | spin_lock(lock: &ci->i_unsafe_lock); |
1238 | list_del_init(entry: &req->r_unsafe_target_item); |
1239 | spin_unlock(lock: &ci->i_unsafe_lock); |
1240 | } |
1241 | |
1242 | if (req->r_unsafe_dir) { |
1243 | iput(req->r_unsafe_dir); |
1244 | req->r_unsafe_dir = NULL; |
1245 | } |
1246 | |
1247 | complete_all(&req->r_safe_completion); |
1248 | |
1249 | ceph_mdsc_put_request(req); |
1250 | } |
1251 | |
1252 | /* |
1253 | * Walk back up the dentry tree until we hit a dentry representing a |
1254 | * non-snapshot inode. We do this using the rcu_read_lock (which must be held |
1255 | * when calling this) to ensure that the objects won't disappear while we're |
1256 | * working with them. Once we hit a candidate dentry, we attempt to take a |
1257 | * reference to it, and return that as the result. |
1258 | */ |
1259 | static struct inode *get_nonsnap_parent(struct dentry *dentry) |
1260 | { |
1261 | struct inode *inode = NULL; |
1262 | |
1263 | while (dentry && !IS_ROOT(dentry)) { |
1264 | inode = d_inode_rcu(dentry); |
1265 | if (!inode || ceph_snap(inode) == CEPH_NOSNAP) |
1266 | break; |
1267 | dentry = dentry->d_parent; |
1268 | } |
1269 | if (inode) |
1270 | inode = igrab(inode); |
1271 | return inode; |
1272 | } |
1273 | |
1274 | /* |
1275 | * Choose mds to send request to next. If there is a hint set in the |
1276 | * request (e.g., due to a prior forward hint from the mds), use that. |
1277 | * Otherwise, consult frag tree and/or caps to identify the |
1278 | * appropriate mds. If all else fails, choose randomly. |
1279 | * |
1280 | * Called under mdsc->mutex. |
1281 | */ |
1282 | static int __choose_mds(struct ceph_mds_client *mdsc, |
1283 | struct ceph_mds_request *req, |
1284 | bool *random) |
1285 | { |
1286 | struct inode *inode; |
1287 | struct ceph_inode_info *ci; |
1288 | struct ceph_cap *cap; |
1289 | int mode = req->r_direct_mode; |
1290 | int mds = -1; |
1291 | u32 hash = req->r_direct_hash; |
1292 | bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); |
1293 | struct ceph_client *cl = mdsc->fsc->client; |
1294 | |
1295 | if (random) |
1296 | *random = false; |
1297 | |
1298 | /* |
1299 | * is there a specific mds we should try? ignore hint if we have |
1300 | * no session and the mds is not up (active or recovering). |
1301 | */ |
1302 | if (req->r_resend_mds >= 0 && |
1303 | (__have_session(mdsc, mds: req->r_resend_mds) || |
1304 | ceph_mdsmap_get_state(m: mdsc->mdsmap, w: req->r_resend_mds) > 0)) { |
1305 | doutc(cl, "using resend_mds mds%d\n" , req->r_resend_mds); |
1306 | return req->r_resend_mds; |
1307 | } |
1308 | |
1309 | if (mode == USE_RANDOM_MDS) |
1310 | goto random; |
1311 | |
1312 | inode = NULL; |
1313 | if (req->r_inode) { |
1314 | if (ceph_snap(inode: req->r_inode) != CEPH_SNAPDIR) { |
1315 | inode = req->r_inode; |
1316 | ihold(inode); |
1317 | } else { |
1318 | /* req->r_dentry is non-null for LSSNAP request */ |
1319 | rcu_read_lock(); |
1320 | inode = get_nonsnap_parent(dentry: req->r_dentry); |
1321 | rcu_read_unlock(); |
1322 | doutc(cl, "using snapdir's parent %p %llx.%llx\n" , |
1323 | inode, ceph_vinop(inode)); |
1324 | } |
1325 | } else if (req->r_dentry) { |
1326 | /* ignore race with rename; old or new d_parent is okay */ |
1327 | struct dentry *parent; |
1328 | struct inode *dir; |
1329 | |
1330 | rcu_read_lock(); |
1331 | parent = READ_ONCE(req->r_dentry->d_parent); |
1332 | dir = req->r_parent ? : d_inode_rcu(dentry: parent); |
1333 | |
1334 | if (!dir || dir->i_sb != mdsc->fsc->sb) { |
1335 | /* not this fs or parent went negative */ |
1336 | inode = d_inode(dentry: req->r_dentry); |
1337 | if (inode) |
1338 | ihold(inode); |
1339 | } else if (ceph_snap(inode: dir) != CEPH_NOSNAP) { |
1340 | /* direct snapped/virtual snapdir requests |
1341 | * based on parent dir inode */ |
1342 | inode = get_nonsnap_parent(dentry: parent); |
1343 | doutc(cl, "using nonsnap parent %p %llx.%llx\n" , |
1344 | inode, ceph_vinop(inode)); |
1345 | } else { |
1346 | /* dentry target */ |
1347 | inode = d_inode(dentry: req->r_dentry); |
1348 | if (!inode || mode == USE_AUTH_MDS) { |
1349 | /* dir + name */ |
1350 | inode = igrab(dir); |
1351 | hash = ceph_dentry_hash(dir, dn: req->r_dentry); |
1352 | is_hash = true; |
1353 | } else { |
1354 | ihold(inode); |
1355 | } |
1356 | } |
1357 | rcu_read_unlock(); |
1358 | } |
1359 | |
1360 | if (!inode) |
1361 | goto random; |
1362 | |
1363 | doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n" , inode, |
1364 | ceph_vinop(inode), (int)is_hash, hash, mode); |
1365 | ci = ceph_inode(inode); |
1366 | |
1367 | if (is_hash && S_ISDIR(inode->i_mode)) { |
1368 | struct ceph_inode_frag frag; |
1369 | int found; |
1370 | |
1371 | ceph_choose_frag(ci, v: hash, pfrag: &frag, found: &found); |
1372 | if (found) { |
1373 | if (mode == USE_ANY_MDS && frag.ndist > 0) { |
1374 | u8 r; |
1375 | |
1376 | /* choose a random replica */ |
1377 | get_random_bytes(buf: &r, len: 1); |
1378 | r %= frag.ndist; |
1379 | mds = frag.dist[r]; |
1380 | doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n" , |
1381 | inode, ceph_vinop(inode), frag.frag, |
1382 | mds, (int)r, frag.ndist); |
1383 | if (ceph_mdsmap_get_state(m: mdsc->mdsmap, w: mds) >= |
1384 | CEPH_MDS_STATE_ACTIVE && |
1385 | !ceph_mdsmap_is_laggy(m: mdsc->mdsmap, w: mds)) |
1386 | goto out; |
1387 | } |
1388 | |
1389 | /* since this file/dir wasn't known to be |
1390 | * replicated, then we want to look for the |
1391 | * authoritative mds. */ |
1392 | if (frag.mds >= 0) { |
1393 | /* choose auth mds */ |
1394 | mds = frag.mds; |
1395 | doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n" , |
1396 | inode, ceph_vinop(inode), frag.frag, mds); |
1397 | if (ceph_mdsmap_get_state(m: mdsc->mdsmap, w: mds) >= |
1398 | CEPH_MDS_STATE_ACTIVE) { |
1399 | if (!ceph_mdsmap_is_laggy(m: mdsc->mdsmap, |
1400 | w: mds)) |
1401 | goto out; |
1402 | } |
1403 | } |
1404 | mode = USE_AUTH_MDS; |
1405 | } |
1406 | } |
1407 | |
1408 | spin_lock(lock: &ci->i_ceph_lock); |
1409 | cap = NULL; |
1410 | if (mode == USE_AUTH_MDS) |
1411 | cap = ci->i_auth_cap; |
1412 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) |
1413 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); |
1414 | if (!cap) { |
1415 | spin_unlock(lock: &ci->i_ceph_lock); |
1416 | iput(inode); |
1417 | goto random; |
1418 | } |
1419 | mds = cap->session->s_mds; |
1420 | doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n" , inode, |
1421 | ceph_vinop(inode), mds, |
1422 | cap == ci->i_auth_cap ? "auth " : "" , cap); |
1423 | spin_unlock(lock: &ci->i_ceph_lock); |
1424 | out: |
1425 | iput(inode); |
1426 | return mds; |
1427 | |
1428 | random: |
1429 | if (random) |
1430 | *random = true; |
1431 | |
1432 | mds = ceph_mdsmap_get_random_mds(m: mdsc->mdsmap); |
1433 | doutc(cl, "chose random mds%d\n" , mds); |
1434 | return mds; |
1435 | } |
1436 | |
1437 | |
1438 | /* |
1439 | * session messages |
1440 | */ |
1441 | struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq) |
1442 | { |
1443 | struct ceph_msg *msg; |
1444 | struct ceph_mds_session_head *h; |
1445 | |
1446 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, front_len: sizeof(*h), GFP_NOFS, |
1447 | can_fail: false); |
1448 | if (!msg) { |
1449 | pr_err("ENOMEM creating session %s msg\n" , |
1450 | ceph_session_op_name(op)); |
1451 | return NULL; |
1452 | } |
1453 | h = msg->front.iov_base; |
1454 | h->op = cpu_to_le32(op); |
1455 | h->seq = cpu_to_le64(seq); |
1456 | |
1457 | return msg; |
1458 | } |
1459 | |
1460 | static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED; |
1461 | #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8) |
1462 | static int encode_supported_features(void **p, void *end) |
1463 | { |
1464 | static const size_t count = ARRAY_SIZE(feature_bits); |
1465 | |
1466 | if (count > 0) { |
1467 | size_t i; |
1468 | size_t size = FEATURE_BYTES(count); |
1469 | unsigned long bit; |
1470 | |
1471 | if (WARN_ON_ONCE(*p + 4 + size > end)) |
1472 | return -ERANGE; |
1473 | |
1474 | ceph_encode_32(p, v: size); |
1475 | memset(*p, 0, size); |
1476 | for (i = 0; i < count; i++) { |
1477 | bit = feature_bits[i]; |
1478 | ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8); |
1479 | } |
1480 | *p += size; |
1481 | } else { |
1482 | if (WARN_ON_ONCE(*p + 4 > end)) |
1483 | return -ERANGE; |
1484 | |
1485 | ceph_encode_32(p, v: 0); |
1486 | } |
1487 | |
1488 | return 0; |
1489 | } |
1490 | |
1491 | static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED; |
1492 | #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8) |
1493 | static int encode_metric_spec(void **p, void *end) |
1494 | { |
1495 | static const size_t count = ARRAY_SIZE(metric_bits); |
1496 | |
1497 | /* header */ |
1498 | if (WARN_ON_ONCE(*p + 2 > end)) |
1499 | return -ERANGE; |
1500 | |
1501 | ceph_encode_8(p, v: 1); /* version */ |
1502 | ceph_encode_8(p, v: 1); /* compat */ |
1503 | |
1504 | if (count > 0) { |
1505 | size_t i; |
1506 | size_t size = METRIC_BYTES(count); |
1507 | |
1508 | if (WARN_ON_ONCE(*p + 4 + 4 + size > end)) |
1509 | return -ERANGE; |
1510 | |
1511 | /* metric spec info length */ |
1512 | ceph_encode_32(p, v: 4 + size); |
1513 | |
1514 | /* metric spec */ |
1515 | ceph_encode_32(p, v: size); |
1516 | memset(*p, 0, size); |
1517 | for (i = 0; i < count; i++) |
1518 | ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8); |
1519 | *p += size; |
1520 | } else { |
1521 | if (WARN_ON_ONCE(*p + 4 + 4 > end)) |
1522 | return -ERANGE; |
1523 | |
1524 | /* metric spec info length */ |
1525 | ceph_encode_32(p, v: 4); |
1526 | /* metric spec */ |
1527 | ceph_encode_32(p, v: 0); |
1528 | } |
1529 | |
1530 | return 0; |
1531 | } |
1532 | |
1533 | /* |
1534 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN |
1535 | * to include additional client metadata fields. |
1536 | */ |
1537 | static struct ceph_msg * |
1538 | create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq) |
1539 | { |
1540 | struct ceph_msg *msg; |
1541 | struct ceph_mds_session_head *h; |
1542 | int i; |
1543 | int = 0; |
1544 | int metadata_key_count = 0; |
1545 | struct ceph_options *opt = mdsc->fsc->client->options; |
1546 | struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; |
1547 | struct ceph_client *cl = mdsc->fsc->client; |
1548 | size_t size, count; |
1549 | void *p, *end; |
1550 | int ret; |
1551 | |
1552 | const char* metadata[][2] = { |
1553 | {"hostname" , mdsc->nodename}, |
1554 | {"kernel_version" , init_utsname()->release}, |
1555 | {"entity_id" , opt->name ? : "" }, |
1556 | {"root" , fsopt->server_path ? : "/" }, |
1557 | {NULL, NULL} |
1558 | }; |
1559 | |
1560 | /* Calculate serialized length of metadata */ |
1561 | extra_bytes = 4; /* map length */ |
1562 | for (i = 0; metadata[i][0]; ++i) { |
1563 | extra_bytes += 8 + strlen(metadata[i][0]) + |
1564 | strlen(metadata[i][1]); |
1565 | metadata_key_count++; |
1566 | } |
1567 | |
1568 | /* supported feature */ |
1569 | size = 0; |
1570 | count = ARRAY_SIZE(feature_bits); |
1571 | if (count > 0) |
1572 | size = FEATURE_BYTES(count); |
1573 | extra_bytes += 4 + size; |
1574 | |
1575 | /* metric spec */ |
1576 | size = 0; |
1577 | count = ARRAY_SIZE(metric_bits); |
1578 | if (count > 0) |
1579 | size = METRIC_BYTES(count); |
1580 | extra_bytes += 2 + 4 + 4 + size; |
1581 | |
1582 | /* flags, mds auth caps and oldest_client_tid */ |
1583 | extra_bytes += 4 + 4 + 8; |
1584 | |
1585 | /* Allocate the message */ |
1586 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, front_len: sizeof(*h) + extra_bytes, |
1587 | GFP_NOFS, can_fail: false); |
1588 | if (!msg) { |
1589 | pr_err_client(cl, "ENOMEM creating session open msg\n" ); |
1590 | return ERR_PTR(error: -ENOMEM); |
1591 | } |
1592 | p = msg->front.iov_base; |
1593 | end = p + msg->front.iov_len; |
1594 | |
1595 | h = p; |
1596 | h->op = cpu_to_le32(op); |
1597 | h->seq = cpu_to_le64(seq); |
1598 | |
1599 | /* |
1600 | * Serialize client metadata into waiting buffer space, using |
1601 | * the format that userspace expects for map<string, string> |
1602 | * |
1603 | * ClientSession messages with metadata are v7 |
1604 | */ |
1605 | msg->hdr.version = cpu_to_le16(7); |
1606 | msg->hdr.compat_version = cpu_to_le16(1); |
1607 | |
1608 | /* The write pointer, following the session_head structure */ |
1609 | p += sizeof(*h); |
1610 | |
1611 | /* Number of entries in the map */ |
1612 | ceph_encode_32(p: &p, v: metadata_key_count); |
1613 | |
1614 | /* Two length-prefixed strings for each entry in the map */ |
1615 | for (i = 0; metadata[i][0]; ++i) { |
1616 | size_t const key_len = strlen(metadata[i][0]); |
1617 | size_t const val_len = strlen(metadata[i][1]); |
1618 | |
1619 | ceph_encode_32(p: &p, v: key_len); |
1620 | memcpy(p, metadata[i][0], key_len); |
1621 | p += key_len; |
1622 | ceph_encode_32(p: &p, v: val_len); |
1623 | memcpy(p, metadata[i][1], val_len); |
1624 | p += val_len; |
1625 | } |
1626 | |
1627 | ret = encode_supported_features(p: &p, end); |
1628 | if (ret) { |
1629 | pr_err_client(cl, "encode_supported_features failed!\n" ); |
1630 | ceph_msg_put(msg); |
1631 | return ERR_PTR(error: ret); |
1632 | } |
1633 | |
1634 | ret = encode_metric_spec(p: &p, end); |
1635 | if (ret) { |
1636 | pr_err_client(cl, "encode_metric_spec failed!\n" ); |
1637 | ceph_msg_put(msg); |
1638 | return ERR_PTR(error: ret); |
1639 | } |
1640 | |
1641 | /* version == 5, flags */ |
1642 | ceph_encode_32(p: &p, v: 0); |
1643 | |
1644 | /* version == 6, mds auth caps */ |
1645 | ceph_encode_32(p: &p, v: 0); |
1646 | |
1647 | /* version == 7, oldest_client_tid */ |
1648 | ceph_encode_64(p: &p, v: mdsc->oldest_tid); |
1649 | |
1650 | msg->front.iov_len = p - msg->front.iov_base; |
1651 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1652 | |
1653 | return msg; |
1654 | } |
1655 | |
1656 | /* |
1657 | * send session open request. |
1658 | * |
1659 | * called under mdsc->mutex |
1660 | */ |
1661 | static int __open_session(struct ceph_mds_client *mdsc, |
1662 | struct ceph_mds_session *session) |
1663 | { |
1664 | struct ceph_msg *msg; |
1665 | int mstate; |
1666 | int mds = session->s_mds; |
1667 | |
1668 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) |
1669 | return -EIO; |
1670 | |
1671 | /* wait for mds to go active? */ |
1672 | mstate = ceph_mdsmap_get_state(m: mdsc->mdsmap, w: mds); |
1673 | doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n" , mds, |
1674 | ceph_mds_state_name(mstate)); |
1675 | session->s_state = CEPH_MDS_SESSION_OPENING; |
1676 | session->s_renew_requested = jiffies; |
1677 | |
1678 | /* send connect message */ |
1679 | msg = create_session_full_msg(mdsc, op: CEPH_SESSION_REQUEST_OPEN, |
1680 | seq: session->s_seq); |
1681 | if (IS_ERR(ptr: msg)) |
1682 | return PTR_ERR(ptr: msg); |
1683 | ceph_con_send(con: &session->s_con, msg); |
1684 | return 0; |
1685 | } |
1686 | |
1687 | /* |
1688 | * open sessions for any export targets for the given mds |
1689 | * |
1690 | * called under mdsc->mutex |
1691 | */ |
1692 | static struct ceph_mds_session * |
1693 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) |
1694 | { |
1695 | struct ceph_mds_session *session; |
1696 | int ret; |
1697 | |
1698 | session = __ceph_lookup_mds_session(mdsc, mds: target); |
1699 | if (!session) { |
1700 | session = register_session(mdsc, mds: target); |
1701 | if (IS_ERR(ptr: session)) |
1702 | return session; |
1703 | } |
1704 | if (session->s_state == CEPH_MDS_SESSION_NEW || |
1705 | session->s_state == CEPH_MDS_SESSION_CLOSING) { |
1706 | ret = __open_session(mdsc, session); |
1707 | if (ret) |
1708 | return ERR_PTR(error: ret); |
1709 | } |
1710 | |
1711 | return session; |
1712 | } |
1713 | |
1714 | struct ceph_mds_session * |
1715 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) |
1716 | { |
1717 | struct ceph_mds_session *session; |
1718 | struct ceph_client *cl = mdsc->fsc->client; |
1719 | |
1720 | doutc(cl, "to mds%d\n" , target); |
1721 | |
1722 | mutex_lock(&mdsc->mutex); |
1723 | session = __open_export_target_session(mdsc, target); |
1724 | mutex_unlock(lock: &mdsc->mutex); |
1725 | |
1726 | return session; |
1727 | } |
1728 | |
1729 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
1730 | struct ceph_mds_session *session) |
1731 | { |
1732 | struct ceph_mds_info *mi; |
1733 | struct ceph_mds_session *ts; |
1734 | int i, mds = session->s_mds; |
1735 | struct ceph_client *cl = mdsc->fsc->client; |
1736 | |
1737 | if (mds >= mdsc->mdsmap->possible_max_rank) |
1738 | return; |
1739 | |
1740 | mi = &mdsc->mdsmap->m_info[mds]; |
1741 | doutc(cl, "for mds%d (%d targets)\n" , session->s_mds, |
1742 | mi->num_export_targets); |
1743 | |
1744 | for (i = 0; i < mi->num_export_targets; i++) { |
1745 | ts = __open_export_target_session(mdsc, target: mi->export_targets[i]); |
1746 | ceph_put_mds_session(s: ts); |
1747 | } |
1748 | } |
1749 | |
1750 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1751 | struct ceph_mds_session *session) |
1752 | { |
1753 | mutex_lock(&mdsc->mutex); |
1754 | __open_export_target_sessions(mdsc, session); |
1755 | mutex_unlock(lock: &mdsc->mutex); |
1756 | } |
1757 | |
1758 | /* |
1759 | * session caps |
1760 | */ |
1761 | |
1762 | static void detach_cap_releases(struct ceph_mds_session *session, |
1763 | struct list_head *target) |
1764 | { |
1765 | struct ceph_client *cl = session->s_mdsc->fsc->client; |
1766 | |
1767 | lockdep_assert_held(&session->s_cap_lock); |
1768 | |
1769 | list_splice_init(list: &session->s_cap_releases, head: target); |
1770 | session->s_num_cap_releases = 0; |
1771 | doutc(cl, "mds%d\n" , session->s_mds); |
1772 | } |
1773 | |
1774 | static void dispose_cap_releases(struct ceph_mds_client *mdsc, |
1775 | struct list_head *dispose) |
1776 | { |
1777 | while (!list_empty(head: dispose)) { |
1778 | struct ceph_cap *cap; |
1779 | /* zero out the in-progress message */ |
1780 | cap = list_first_entry(dispose, struct ceph_cap, session_caps); |
1781 | list_del(entry: &cap->session_caps); |
1782 | ceph_put_cap(mdsc, cap); |
1783 | } |
1784 | } |
1785 | |
1786 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1787 | struct ceph_mds_session *session) |
1788 | { |
1789 | struct ceph_client *cl = mdsc->fsc->client; |
1790 | struct ceph_mds_request *req; |
1791 | struct rb_node *p; |
1792 | |
1793 | doutc(cl, "mds%d\n" , session->s_mds); |
1794 | mutex_lock(&mdsc->mutex); |
1795 | while (!list_empty(head: &session->s_unsafe)) { |
1796 | req = list_first_entry(&session->s_unsafe, |
1797 | struct ceph_mds_request, r_unsafe_item); |
1798 | pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n" , |
1799 | req->r_tid); |
1800 | if (req->r_target_inode) |
1801 | mapping_set_error(mapping: req->r_target_inode->i_mapping, error: -EIO); |
1802 | if (req->r_unsafe_dir) |
1803 | mapping_set_error(mapping: req->r_unsafe_dir->i_mapping, error: -EIO); |
1804 | __unregister_request(mdsc, req); |
1805 | } |
1806 | /* zero r_attempts, so kick_requests() will re-send requests */ |
1807 | p = rb_first(&mdsc->request_tree); |
1808 | while (p) { |
1809 | req = rb_entry(p, struct ceph_mds_request, r_node); |
1810 | p = rb_next(p); |
1811 | if (req->r_session && |
1812 | req->r_session->s_mds == session->s_mds) |
1813 | req->r_attempts = 0; |
1814 | } |
1815 | mutex_unlock(lock: &mdsc->mutex); |
1816 | } |
1817 | |
1818 | /* |
1819 | * Helper to safely iterate over all caps associated with a session, with |
1820 | * special care taken to handle a racing __ceph_remove_cap(). |
1821 | * |
1822 | * Caller must hold session s_mutex. |
1823 | */ |
1824 | int ceph_iterate_session_caps(struct ceph_mds_session *session, |
1825 | int (*cb)(struct inode *, int mds, void *), |
1826 | void *arg) |
1827 | { |
1828 | struct ceph_client *cl = session->s_mdsc->fsc->client; |
1829 | struct list_head *p; |
1830 | struct ceph_cap *cap; |
1831 | struct inode *inode, *last_inode = NULL; |
1832 | struct ceph_cap *old_cap = NULL; |
1833 | int ret; |
1834 | |
1835 | doutc(cl, "%p mds%d\n" , session, session->s_mds); |
1836 | spin_lock(lock: &session->s_cap_lock); |
1837 | p = session->s_caps.next; |
1838 | while (p != &session->s_caps) { |
1839 | int mds; |
1840 | |
1841 | cap = list_entry(p, struct ceph_cap, session_caps); |
1842 | inode = igrab(&cap->ci->netfs.inode); |
1843 | if (!inode) { |
1844 | p = p->next; |
1845 | continue; |
1846 | } |
1847 | session->s_cap_iterator = cap; |
1848 | mds = cap->mds; |
1849 | spin_unlock(lock: &session->s_cap_lock); |
1850 | |
1851 | if (last_inode) { |
1852 | iput(last_inode); |
1853 | last_inode = NULL; |
1854 | } |
1855 | if (old_cap) { |
1856 | ceph_put_cap(mdsc: session->s_mdsc, cap: old_cap); |
1857 | old_cap = NULL; |
1858 | } |
1859 | |
1860 | ret = cb(inode, mds, arg); |
1861 | last_inode = inode; |
1862 | |
1863 | spin_lock(lock: &session->s_cap_lock); |
1864 | p = p->next; |
1865 | if (!cap->ci) { |
1866 | doutc(cl, "finishing cap %p removal\n" , cap); |
1867 | BUG_ON(cap->session != session); |
1868 | cap->session = NULL; |
1869 | list_del_init(entry: &cap->session_caps); |
1870 | session->s_nr_caps--; |
1871 | atomic64_dec(v: &session->s_mdsc->metric.total_caps); |
1872 | if (cap->queue_release) |
1873 | __ceph_queue_cap_release(session, cap); |
1874 | else |
1875 | old_cap = cap; /* put_cap it w/o locks held */ |
1876 | } |
1877 | if (ret < 0) |
1878 | goto out; |
1879 | } |
1880 | ret = 0; |
1881 | out: |
1882 | session->s_cap_iterator = NULL; |
1883 | spin_unlock(lock: &session->s_cap_lock); |
1884 | |
1885 | iput(last_inode); |
1886 | if (old_cap) |
1887 | ceph_put_cap(mdsc: session->s_mdsc, cap: old_cap); |
1888 | |
1889 | return ret; |
1890 | } |
1891 | |
1892 | static int remove_session_caps_cb(struct inode *inode, int mds, void *arg) |
1893 | { |
1894 | struct ceph_inode_info *ci = ceph_inode(inode); |
1895 | struct ceph_client *cl = ceph_inode_to_client(inode); |
1896 | bool invalidate = false; |
1897 | struct ceph_cap *cap; |
1898 | int iputs = 0; |
1899 | |
1900 | spin_lock(lock: &ci->i_ceph_lock); |
1901 | cap = __get_cap_for_mds(ci, mds); |
1902 | if (cap) { |
1903 | doutc(cl, " removing cap %p, ci is %p, inode is %p\n" , |
1904 | cap, ci, &ci->netfs.inode); |
1905 | |
1906 | iputs = ceph_purge_inode_cap(inode, cap, invalidate: &invalidate); |
1907 | } |
1908 | spin_unlock(lock: &ci->i_ceph_lock); |
1909 | |
1910 | if (cap) |
1911 | wake_up_all(&ci->i_cap_wq); |
1912 | if (invalidate) |
1913 | ceph_queue_invalidate(inode); |
1914 | while (iputs--) |
1915 | iput(inode); |
1916 | return 0; |
1917 | } |
1918 | |
1919 | /* |
1920 | * caller must hold session s_mutex |
1921 | */ |
1922 | static void remove_session_caps(struct ceph_mds_session *session) |
1923 | { |
1924 | struct ceph_fs_client *fsc = session->s_mdsc->fsc; |
1925 | struct super_block *sb = fsc->sb; |
1926 | LIST_HEAD(dispose); |
1927 | |
1928 | doutc(fsc->client, "on %p\n" , session); |
1929 | ceph_iterate_session_caps(session, cb: remove_session_caps_cb, arg: fsc); |
1930 | |
1931 | wake_up_all(&fsc->mdsc->cap_flushing_wq); |
1932 | |
1933 | spin_lock(lock: &session->s_cap_lock); |
1934 | if (session->s_nr_caps > 0) { |
1935 | struct inode *inode; |
1936 | struct ceph_cap *cap, *prev = NULL; |
1937 | struct ceph_vino vino; |
1938 | /* |
1939 | * iterate_session_caps() skips inodes that are being |
1940 | * deleted, we need to wait until deletions are complete. |
1941 | * __wait_on_freeing_inode() is designed for the job, |
1942 | * but it is not exported, so use lookup inode function |
1943 | * to access it. |
1944 | */ |
1945 | while (!list_empty(head: &session->s_caps)) { |
1946 | cap = list_entry(session->s_caps.next, |
1947 | struct ceph_cap, session_caps); |
1948 | if (cap == prev) |
1949 | break; |
1950 | prev = cap; |
1951 | vino = cap->ci->i_vino; |
1952 | spin_unlock(lock: &session->s_cap_lock); |
1953 | |
1954 | inode = ceph_find_inode(sb, vino); |
1955 | iput(inode); |
1956 | |
1957 | spin_lock(lock: &session->s_cap_lock); |
1958 | } |
1959 | } |
1960 | |
1961 | // drop cap expires and unlock s_cap_lock |
1962 | detach_cap_releases(session, target: &dispose); |
1963 | |
1964 | BUG_ON(session->s_nr_caps > 0); |
1965 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
1966 | spin_unlock(lock: &session->s_cap_lock); |
1967 | dispose_cap_releases(mdsc: session->s_mdsc, dispose: &dispose); |
1968 | } |
1969 | |
1970 | enum { |
1971 | RECONNECT, |
1972 | RENEWCAPS, |
1973 | FORCE_RO, |
1974 | }; |
1975 | |
1976 | /* |
1977 | * wake up any threads waiting on this session's caps. if the cap is |
1978 | * old (didn't get renewed on the client reconnect), remove it now. |
1979 | * |
1980 | * caller must hold s_mutex. |
1981 | */ |
1982 | static int wake_up_session_cb(struct inode *inode, int mds, void *arg) |
1983 | { |
1984 | struct ceph_inode_info *ci = ceph_inode(inode); |
1985 | unsigned long ev = (unsigned long)arg; |
1986 | |
1987 | if (ev == RECONNECT) { |
1988 | spin_lock(lock: &ci->i_ceph_lock); |
1989 | ci->i_wanted_max_size = 0; |
1990 | ci->i_requested_max_size = 0; |
1991 | spin_unlock(lock: &ci->i_ceph_lock); |
1992 | } else if (ev == RENEWCAPS) { |
1993 | struct ceph_cap *cap; |
1994 | |
1995 | spin_lock(lock: &ci->i_ceph_lock); |
1996 | cap = __get_cap_for_mds(ci, mds); |
1997 | /* mds did not re-issue stale cap */ |
1998 | if (cap && cap->cap_gen < atomic_read(v: &cap->session->s_cap_gen)) |
1999 | cap->issued = cap->implemented = CEPH_CAP_PIN; |
2000 | spin_unlock(lock: &ci->i_ceph_lock); |
2001 | } else if (ev == FORCE_RO) { |
2002 | } |
2003 | wake_up_all(&ci->i_cap_wq); |
2004 | return 0; |
2005 | } |
2006 | |
2007 | static void wake_up_session_caps(struct ceph_mds_session *session, int ev) |
2008 | { |
2009 | struct ceph_client *cl = session->s_mdsc->fsc->client; |
2010 | |
2011 | doutc(cl, "session %p mds%d\n" , session, session->s_mds); |
2012 | ceph_iterate_session_caps(session, cb: wake_up_session_cb, |
2013 | arg: (void *)(unsigned long)ev); |
2014 | } |
2015 | |
2016 | /* |
2017 | * Send periodic message to MDS renewing all currently held caps. The |
2018 | * ack will reset the expiration for all caps from this session. |
2019 | * |
2020 | * caller holds s_mutex |
2021 | */ |
2022 | static int send_renew_caps(struct ceph_mds_client *mdsc, |
2023 | struct ceph_mds_session *session) |
2024 | { |
2025 | struct ceph_client *cl = mdsc->fsc->client; |
2026 | struct ceph_msg *msg; |
2027 | int state; |
2028 | |
2029 | if (time_after_eq(jiffies, session->s_cap_ttl) && |
2030 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) |
2031 | pr_info_client(cl, "mds%d caps stale\n" , session->s_mds); |
2032 | session->s_renew_requested = jiffies; |
2033 | |
2034 | /* do not try to renew caps until a recovering mds has reconnected |
2035 | * with its clients. */ |
2036 | state = ceph_mdsmap_get_state(m: mdsc->mdsmap, w: session->s_mds); |
2037 | if (state < CEPH_MDS_STATE_RECONNECT) { |
2038 | doutc(cl, "ignoring mds%d (%s)\n" , session->s_mds, |
2039 | ceph_mds_state_name(state)); |
2040 | return 0; |
2041 | } |
2042 | |
2043 | doutc(cl, "to mds%d (%s)\n" , session->s_mds, |
2044 | ceph_mds_state_name(state)); |
2045 | msg = create_session_full_msg(mdsc, op: CEPH_SESSION_REQUEST_RENEWCAPS, |
2046 | seq: ++session->s_renew_seq); |
2047 | if (IS_ERR(ptr: msg)) |
2048 | return PTR_ERR(ptr: msg); |
2049 | ceph_con_send(con: &session->s_con, msg); |
2050 | return 0; |
2051 | } |
2052 | |
2053 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
2054 | struct ceph_mds_session *session, u64 seq) |
2055 | { |
2056 | struct ceph_client *cl = mdsc->fsc->client; |
2057 | struct ceph_msg *msg; |
2058 | |
2059 | doutc(cl, "to mds%d (%s)s seq %lld\n" , session->s_mds, |
2060 | ceph_session_state_name(session->s_state), seq); |
2061 | msg = ceph_create_session_msg(op: CEPH_SESSION_FLUSHMSG_ACK, seq); |
2062 | if (!msg) |
2063 | return -ENOMEM; |
2064 | ceph_con_send(con: &session->s_con, msg); |
2065 | return 0; |
2066 | } |
2067 | |
2068 | |
2069 | /* |
2070 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). |
2071 | * |
2072 | * Called under session->s_mutex |
2073 | */ |
2074 | static void renewed_caps(struct ceph_mds_client *mdsc, |
2075 | struct ceph_mds_session *session, int is_renew) |
2076 | { |
2077 | struct ceph_client *cl = mdsc->fsc->client; |
2078 | int was_stale; |
2079 | int wake = 0; |
2080 | |
2081 | spin_lock(lock: &session->s_cap_lock); |
2082 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2083 | |
2084 | session->s_cap_ttl = session->s_renew_requested + |
2085 | mdsc->mdsmap->m_session_timeout*HZ; |
2086 | |
2087 | if (was_stale) { |
2088 | if (time_before(jiffies, session->s_cap_ttl)) { |
2089 | pr_info_client(cl, "mds%d caps renewed\n" , |
2090 | session->s_mds); |
2091 | wake = 1; |
2092 | } else { |
2093 | pr_info_client(cl, "mds%d caps still stale\n" , |
2094 | session->s_mds); |
2095 | } |
2096 | } |
2097 | doutc(cl, "mds%d ttl now %lu, was %s, now %s\n" , session->s_mds, |
2098 | session->s_cap_ttl, was_stale ? "stale" : "fresh" , |
2099 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh" ); |
2100 | spin_unlock(lock: &session->s_cap_lock); |
2101 | |
2102 | if (wake) |
2103 | wake_up_session_caps(session, ev: RENEWCAPS); |
2104 | } |
2105 | |
2106 | /* |
2107 | * send a session close request |
2108 | */ |
2109 | static int request_close_session(struct ceph_mds_session *session) |
2110 | { |
2111 | struct ceph_client *cl = session->s_mdsc->fsc->client; |
2112 | struct ceph_msg *msg; |
2113 | |
2114 | doutc(cl, "mds%d state %s seq %lld\n" , session->s_mds, |
2115 | ceph_session_state_name(session->s_state), session->s_seq); |
2116 | msg = ceph_create_session_msg(op: CEPH_SESSION_REQUEST_CLOSE, |
2117 | seq: session->s_seq); |
2118 | if (!msg) |
2119 | return -ENOMEM; |
2120 | ceph_con_send(con: &session->s_con, msg); |
2121 | return 1; |
2122 | } |
2123 | |
2124 | /* |
2125 | * Called with s_mutex held. |
2126 | */ |
2127 | static int __close_session(struct ceph_mds_client *mdsc, |
2128 | struct ceph_mds_session *session) |
2129 | { |
2130 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) |
2131 | return 0; |
2132 | session->s_state = CEPH_MDS_SESSION_CLOSING; |
2133 | return request_close_session(session); |
2134 | } |
2135 | |
2136 | static bool drop_negative_children(struct dentry *dentry) |
2137 | { |
2138 | struct dentry *child; |
2139 | bool all_negative = true; |
2140 | |
2141 | if (!d_is_dir(dentry)) |
2142 | goto out; |
2143 | |
2144 | spin_lock(lock: &dentry->d_lock); |
2145 | hlist_for_each_entry(child, &dentry->d_children, d_sib) { |
2146 | if (d_really_is_positive(dentry: child)) { |
2147 | all_negative = false; |
2148 | break; |
2149 | } |
2150 | } |
2151 | spin_unlock(lock: &dentry->d_lock); |
2152 | |
2153 | if (all_negative) |
2154 | shrink_dcache_parent(dentry); |
2155 | out: |
2156 | return all_negative; |
2157 | } |
2158 | |
2159 | /* |
2160 | * Trim old(er) caps. |
2161 | * |
2162 | * Because we can't cache an inode without one or more caps, we do |
2163 | * this indirectly: if a cap is unused, we prune its aliases, at which |
2164 | * point the inode will hopefully get dropped to. |
2165 | * |
2166 | * Yes, this is a bit sloppy. Our only real goal here is to respond to |
2167 | * memory pressure from the MDS, though, so it needn't be perfect. |
2168 | */ |
2169 | static int trim_caps_cb(struct inode *inode, int mds, void *arg) |
2170 | { |
2171 | struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb: inode->i_sb); |
2172 | struct ceph_client *cl = mdsc->fsc->client; |
2173 | int *remaining = arg; |
2174 | struct ceph_inode_info *ci = ceph_inode(inode); |
2175 | int used, wanted, oissued, mine; |
2176 | struct ceph_cap *cap; |
2177 | |
2178 | if (*remaining <= 0) |
2179 | return -1; |
2180 | |
2181 | spin_lock(lock: &ci->i_ceph_lock); |
2182 | cap = __get_cap_for_mds(ci, mds); |
2183 | if (!cap) { |
2184 | spin_unlock(lock: &ci->i_ceph_lock); |
2185 | return 0; |
2186 | } |
2187 | mine = cap->issued | cap->implemented; |
2188 | used = __ceph_caps_used(ci); |
2189 | wanted = __ceph_caps_file_wanted(ci); |
2190 | oissued = __ceph_caps_issued_other(ci, cap); |
2191 | |
2192 | doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n" , |
2193 | inode, ceph_vinop(inode), cap, ceph_cap_string(mine), |
2194 | ceph_cap_string(oissued), ceph_cap_string(used), |
2195 | ceph_cap_string(wanted)); |
2196 | if (cap == ci->i_auth_cap) { |
2197 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
2198 | !list_empty(head: &ci->i_cap_snaps)) |
2199 | goto out; |
2200 | if ((used | wanted) & CEPH_CAP_ANY_WR) |
2201 | goto out; |
2202 | /* Note: it's possible that i_filelock_ref becomes non-zero |
2203 | * after dropping auth caps. It doesn't hurt because reply |
2204 | * of lock mds request will re-add auth caps. */ |
2205 | if (atomic_read(v: &ci->i_filelock_ref) > 0) |
2206 | goto out; |
2207 | } |
2208 | /* The inode has cached pages, but it's no longer used. |
2209 | * we can safely drop it */ |
2210 | if (S_ISREG(inode->i_mode) && |
2211 | wanted == 0 && used == CEPH_CAP_FILE_CACHE && |
2212 | !(oissued & CEPH_CAP_FILE_CACHE)) { |
2213 | used = 0; |
2214 | oissued = 0; |
2215 | } |
2216 | if ((used | wanted) & ~oissued & mine) |
2217 | goto out; /* we need these caps */ |
2218 | |
2219 | if (oissued) { |
2220 | /* we aren't the only cap.. just remove us */ |
2221 | ceph_remove_cap(mdsc, cap, queue_release: true); |
2222 | (*remaining)--; |
2223 | } else { |
2224 | struct dentry *dentry; |
2225 | /* try dropping referring dentries */ |
2226 | spin_unlock(lock: &ci->i_ceph_lock); |
2227 | dentry = d_find_any_alias(inode); |
2228 | if (dentry && drop_negative_children(dentry)) { |
2229 | int count; |
2230 | dput(dentry); |
2231 | d_prune_aliases(inode); |
2232 | count = atomic_read(v: &inode->i_count); |
2233 | if (count == 1) |
2234 | (*remaining)--; |
2235 | doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n" , |
2236 | inode, ceph_vinop(inode), cap, count); |
2237 | } else { |
2238 | dput(dentry); |
2239 | } |
2240 | return 0; |
2241 | } |
2242 | |
2243 | out: |
2244 | spin_unlock(lock: &ci->i_ceph_lock); |
2245 | return 0; |
2246 | } |
2247 | |
2248 | /* |
2249 | * Trim session cap count down to some max number. |
2250 | */ |
2251 | int ceph_trim_caps(struct ceph_mds_client *mdsc, |
2252 | struct ceph_mds_session *session, |
2253 | int max_caps) |
2254 | { |
2255 | struct ceph_client *cl = mdsc->fsc->client; |
2256 | int trim_caps = session->s_nr_caps - max_caps; |
2257 | |
2258 | doutc(cl, "mds%d start: %d / %d, trim %d\n" , session->s_mds, |
2259 | session->s_nr_caps, max_caps, trim_caps); |
2260 | if (trim_caps > 0) { |
2261 | int remaining = trim_caps; |
2262 | |
2263 | ceph_iterate_session_caps(session, cb: trim_caps_cb, arg: &remaining); |
2264 | doutc(cl, "mds%d done: %d / %d, trimmed %d\n" , |
2265 | session->s_mds, session->s_nr_caps, max_caps, |
2266 | trim_caps - remaining); |
2267 | } |
2268 | |
2269 | ceph_flush_cap_releases(mdsc, session); |
2270 | return 0; |
2271 | } |
2272 | |
2273 | static int check_caps_flush(struct ceph_mds_client *mdsc, |
2274 | u64 want_flush_tid) |
2275 | { |
2276 | struct ceph_client *cl = mdsc->fsc->client; |
2277 | int ret = 1; |
2278 | |
2279 | spin_lock(lock: &mdsc->cap_dirty_lock); |
2280 | if (!list_empty(head: &mdsc->cap_flush_list)) { |
2281 | struct ceph_cap_flush *cf = |
2282 | list_first_entry(&mdsc->cap_flush_list, |
2283 | struct ceph_cap_flush, g_list); |
2284 | if (cf->tid <= want_flush_tid) { |
2285 | doutc(cl, "still flushing tid %llu <= %llu\n" , |
2286 | cf->tid, want_flush_tid); |
2287 | ret = 0; |
2288 | } |
2289 | } |
2290 | spin_unlock(lock: &mdsc->cap_dirty_lock); |
2291 | return ret; |
2292 | } |
2293 | |
2294 | /* |
2295 | * flush all dirty inode data to disk. |
2296 | * |
2297 | * returns true if we've flushed through want_flush_tid |
2298 | */ |
2299 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
2300 | u64 want_flush_tid) |
2301 | { |
2302 | struct ceph_client *cl = mdsc->fsc->client; |
2303 | |
2304 | doutc(cl, "want %llu\n" , want_flush_tid); |
2305 | |
2306 | wait_event(mdsc->cap_flushing_wq, |
2307 | check_caps_flush(mdsc, want_flush_tid)); |
2308 | |
2309 | doutc(cl, "ok, flushed thru %llu\n" , want_flush_tid); |
2310 | } |
2311 | |
2312 | /* |
2313 | * called under s_mutex |
2314 | */ |
2315 | static void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
2316 | struct ceph_mds_session *session) |
2317 | { |
2318 | struct ceph_client *cl = mdsc->fsc->client; |
2319 | struct ceph_msg *msg = NULL; |
2320 | struct ceph_mds_cap_release *head; |
2321 | struct ceph_mds_cap_item *item; |
2322 | struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; |
2323 | struct ceph_cap *cap; |
2324 | LIST_HEAD(tmp_list); |
2325 | int num_cap_releases; |
2326 | __le32 barrier, *cap_barrier; |
2327 | |
2328 | down_read(sem: &osdc->lock); |
2329 | barrier = cpu_to_le32(osdc->epoch_barrier); |
2330 | up_read(sem: &osdc->lock); |
2331 | |
2332 | spin_lock(lock: &session->s_cap_lock); |
2333 | again: |
2334 | list_splice_init(list: &session->s_cap_releases, head: &tmp_list); |
2335 | num_cap_releases = session->s_num_cap_releases; |
2336 | session->s_num_cap_releases = 0; |
2337 | spin_unlock(lock: &session->s_cap_lock); |
2338 | |
2339 | while (!list_empty(head: &tmp_list)) { |
2340 | if (!msg) { |
2341 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, |
2342 | PAGE_SIZE, GFP_NOFS, can_fail: false); |
2343 | if (!msg) |
2344 | goto out_err; |
2345 | head = msg->front.iov_base; |
2346 | head->num = cpu_to_le32(0); |
2347 | msg->front.iov_len = sizeof(*head); |
2348 | |
2349 | msg->hdr.version = cpu_to_le16(2); |
2350 | msg->hdr.compat_version = cpu_to_le16(1); |
2351 | } |
2352 | |
2353 | cap = list_first_entry(&tmp_list, struct ceph_cap, |
2354 | session_caps); |
2355 | list_del(entry: &cap->session_caps); |
2356 | num_cap_releases--; |
2357 | |
2358 | head = msg->front.iov_base; |
2359 | put_unaligned_le32(val: get_unaligned_le32(p: &head->num) + 1, |
2360 | p: &head->num); |
2361 | item = msg->front.iov_base + msg->front.iov_len; |
2362 | item->ino = cpu_to_le64(cap->cap_ino); |
2363 | item->cap_id = cpu_to_le64(cap->cap_id); |
2364 | item->migrate_seq = cpu_to_le32(cap->mseq); |
2365 | item->seq = cpu_to_le32(cap->issue_seq); |
2366 | msg->front.iov_len += sizeof(*item); |
2367 | |
2368 | ceph_put_cap(mdsc, cap); |
2369 | |
2370 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { |
2371 | // Append cap_barrier field |
2372 | cap_barrier = msg->front.iov_base + msg->front.iov_len; |
2373 | *cap_barrier = barrier; |
2374 | msg->front.iov_len += sizeof(*cap_barrier); |
2375 | |
2376 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
2377 | doutc(cl, "mds%d %p\n" , session->s_mds, msg); |
2378 | ceph_con_send(con: &session->s_con, msg); |
2379 | msg = NULL; |
2380 | } |
2381 | } |
2382 | |
2383 | BUG_ON(num_cap_releases != 0); |
2384 | |
2385 | spin_lock(lock: &session->s_cap_lock); |
2386 | if (!list_empty(head: &session->s_cap_releases)) |
2387 | goto again; |
2388 | spin_unlock(lock: &session->s_cap_lock); |
2389 | |
2390 | if (msg) { |
2391 | // Append cap_barrier field |
2392 | cap_barrier = msg->front.iov_base + msg->front.iov_len; |
2393 | *cap_barrier = barrier; |
2394 | msg->front.iov_len += sizeof(*cap_barrier); |
2395 | |
2396 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
2397 | doutc(cl, "mds%d %p\n" , session->s_mds, msg); |
2398 | ceph_con_send(con: &session->s_con, msg); |
2399 | } |
2400 | return; |
2401 | out_err: |
2402 | pr_err_client(cl, "mds%d, failed to allocate message\n" , |
2403 | session->s_mds); |
2404 | spin_lock(lock: &session->s_cap_lock); |
2405 | list_splice(list: &tmp_list, head: &session->s_cap_releases); |
2406 | session->s_num_cap_releases += num_cap_releases; |
2407 | spin_unlock(lock: &session->s_cap_lock); |
2408 | } |
2409 | |
2410 | static void ceph_cap_release_work(struct work_struct *work) |
2411 | { |
2412 | struct ceph_mds_session *session = |
2413 | container_of(work, struct ceph_mds_session, s_cap_release_work); |
2414 | |
2415 | mutex_lock(&session->s_mutex); |
2416 | if (session->s_state == CEPH_MDS_SESSION_OPEN || |
2417 | session->s_state == CEPH_MDS_SESSION_HUNG) |
2418 | ceph_send_cap_releases(mdsc: session->s_mdsc, session); |
2419 | mutex_unlock(lock: &session->s_mutex); |
2420 | ceph_put_mds_session(s: session); |
2421 | } |
2422 | |
2423 | void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, |
2424 | struct ceph_mds_session *session) |
2425 | { |
2426 | struct ceph_client *cl = mdsc->fsc->client; |
2427 | if (mdsc->stopping) |
2428 | return; |
2429 | |
2430 | ceph_get_mds_session(s: session); |
2431 | if (queue_work(wq: mdsc->fsc->cap_wq, |
2432 | work: &session->s_cap_release_work)) { |
2433 | doutc(cl, "cap release work queued\n" ); |
2434 | } else { |
2435 | ceph_put_mds_session(s: session); |
2436 | doutc(cl, "failed to queue cap release work\n" ); |
2437 | } |
2438 | } |
2439 | |
2440 | /* |
2441 | * caller holds session->s_cap_lock |
2442 | */ |
2443 | void __ceph_queue_cap_release(struct ceph_mds_session *session, |
2444 | struct ceph_cap *cap) |
2445 | { |
2446 | list_add_tail(new: &cap->session_caps, head: &session->s_cap_releases); |
2447 | session->s_num_cap_releases++; |
2448 | |
2449 | if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE)) |
2450 | ceph_flush_cap_releases(mdsc: session->s_mdsc, session); |
2451 | } |
2452 | |
2453 | static void ceph_cap_reclaim_work(struct work_struct *work) |
2454 | { |
2455 | struct ceph_mds_client *mdsc = |
2456 | container_of(work, struct ceph_mds_client, cap_reclaim_work); |
2457 | int ret = ceph_trim_dentries(mdsc); |
2458 | if (ret == -EAGAIN) |
2459 | ceph_queue_cap_reclaim_work(mdsc); |
2460 | } |
2461 | |
2462 | void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc) |
2463 | { |
2464 | struct ceph_client *cl = mdsc->fsc->client; |
2465 | if (mdsc->stopping) |
2466 | return; |
2467 | |
2468 | if (queue_work(wq: mdsc->fsc->cap_wq, work: &mdsc->cap_reclaim_work)) { |
2469 | doutc(cl, "caps reclaim work queued\n" ); |
2470 | } else { |
2471 | doutc(cl, "failed to queue caps release work\n" ); |
2472 | } |
2473 | } |
2474 | |
2475 | void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr) |
2476 | { |
2477 | int val; |
2478 | if (!nr) |
2479 | return; |
2480 | val = atomic_add_return(i: nr, v: &mdsc->cap_reclaim_pending); |
2481 | if ((val % CEPH_CAPS_PER_RELEASE) < nr) { |
2482 | atomic_set(v: &mdsc->cap_reclaim_pending, i: 0); |
2483 | ceph_queue_cap_reclaim_work(mdsc); |
2484 | } |
2485 | } |
2486 | |
2487 | void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc) |
2488 | { |
2489 | struct ceph_client *cl = mdsc->fsc->client; |
2490 | if (mdsc->stopping) |
2491 | return; |
2492 | |
2493 | if (queue_work(wq: mdsc->fsc->cap_wq, work: &mdsc->cap_unlink_work)) { |
2494 | doutc(cl, "caps unlink work queued\n" ); |
2495 | } else { |
2496 | doutc(cl, "failed to queue caps unlink work\n" ); |
2497 | } |
2498 | } |
2499 | |
2500 | static void ceph_cap_unlink_work(struct work_struct *work) |
2501 | { |
2502 | struct ceph_mds_client *mdsc = |
2503 | container_of(work, struct ceph_mds_client, cap_unlink_work); |
2504 | struct ceph_client *cl = mdsc->fsc->client; |
2505 | |
2506 | doutc(cl, "begin\n" ); |
2507 | spin_lock(lock: &mdsc->cap_delay_lock); |
2508 | while (!list_empty(head: &mdsc->cap_unlink_delay_list)) { |
2509 | struct ceph_inode_info *ci; |
2510 | struct inode *inode; |
2511 | |
2512 | ci = list_first_entry(&mdsc->cap_unlink_delay_list, |
2513 | struct ceph_inode_info, |
2514 | i_cap_delay_list); |
2515 | list_del_init(entry: &ci->i_cap_delay_list); |
2516 | |
2517 | inode = igrab(&ci->netfs.inode); |
2518 | if (inode) { |
2519 | spin_unlock(lock: &mdsc->cap_delay_lock); |
2520 | doutc(cl, "on %p %llx.%llx\n" , inode, |
2521 | ceph_vinop(inode)); |
2522 | ceph_check_caps(ci, CHECK_CAPS_FLUSH); |
2523 | iput(inode); |
2524 | spin_lock(lock: &mdsc->cap_delay_lock); |
2525 | } |
2526 | } |
2527 | spin_unlock(lock: &mdsc->cap_delay_lock); |
2528 | doutc(cl, "done\n" ); |
2529 | } |
2530 | |
2531 | /* |
2532 | * requests |
2533 | */ |
2534 | |
2535 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
2536 | struct inode *dir) |
2537 | { |
2538 | struct ceph_inode_info *ci = ceph_inode(inode: dir); |
2539 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; |
2540 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; |
2541 | size_t size = sizeof(struct ceph_mds_reply_dir_entry); |
2542 | unsigned int num_entries; |
2543 | int order; |
2544 | |
2545 | spin_lock(lock: &ci->i_ceph_lock); |
2546 | num_entries = ci->i_files + ci->i_subdirs; |
2547 | spin_unlock(lock: &ci->i_ceph_lock); |
2548 | num_entries = max(num_entries, 1U); |
2549 | num_entries = min(num_entries, opt->max_readdir); |
2550 | |
2551 | order = get_order(size: size * num_entries); |
2552 | while (order >= 0) { |
2553 | rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | |
2554 | __GFP_NOWARN | |
2555 | __GFP_ZERO, |
2556 | order); |
2557 | if (rinfo->dir_entries) |
2558 | break; |
2559 | order--; |
2560 | } |
2561 | if (!rinfo->dir_entries) |
2562 | return -ENOMEM; |
2563 | |
2564 | num_entries = (PAGE_SIZE << order) / size; |
2565 | num_entries = min(num_entries, opt->max_readdir); |
2566 | |
2567 | rinfo->dir_buf_size = PAGE_SIZE << order; |
2568 | req->r_num_caps = num_entries + 1; |
2569 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); |
2570 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); |
2571 | return 0; |
2572 | } |
2573 | |
2574 | /* |
2575 | * Create an mds request. |
2576 | */ |
2577 | struct ceph_mds_request * |
2578 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) |
2579 | { |
2580 | struct ceph_mds_request *req; |
2581 | |
2582 | req = kmem_cache_zalloc(k: ceph_mds_request_cachep, GFP_NOFS); |
2583 | if (!req) |
2584 | return ERR_PTR(error: -ENOMEM); |
2585 | |
2586 | mutex_init(&req->r_fill_mutex); |
2587 | req->r_mdsc = mdsc; |
2588 | req->r_started = jiffies; |
2589 | req->r_start_latency = ktime_get(); |
2590 | req->r_resend_mds = -1; |
2591 | INIT_LIST_HEAD(list: &req->r_unsafe_dir_item); |
2592 | INIT_LIST_HEAD(list: &req->r_unsafe_target_item); |
2593 | req->r_fmode = -1; |
2594 | req->r_feature_needed = -1; |
2595 | kref_init(kref: &req->r_kref); |
2596 | RB_CLEAR_NODE(&req->r_node); |
2597 | INIT_LIST_HEAD(list: &req->r_wait); |
2598 | init_completion(x: &req->r_completion); |
2599 | init_completion(x: &req->r_safe_completion); |
2600 | INIT_LIST_HEAD(list: &req->r_unsafe_item); |
2601 | |
2602 | ktime_get_coarse_real_ts64(ts: &req->r_stamp); |
2603 | |
2604 | req->r_op = op; |
2605 | req->r_direct_mode = mode; |
2606 | return req; |
2607 | } |
2608 | |
2609 | /* |
2610 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2611 | * |
2612 | * called under mdsc->mutex. |
2613 | */ |
2614 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
2615 | { |
2616 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) |
2617 | return NULL; |
2618 | return rb_entry(rb_first(&mdsc->request_tree), |
2619 | struct ceph_mds_request, r_node); |
2620 | } |
2621 | |
2622 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2623 | { |
2624 | return mdsc->oldest_tid; |
2625 | } |
2626 | |
2627 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) |
2628 | static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) |
2629 | { |
2630 | struct inode *dir = req->r_parent; |
2631 | struct dentry *dentry = req->r_dentry; |
2632 | u8 *cryptbuf = NULL; |
2633 | u32 len = 0; |
2634 | int ret = 0; |
2635 | |
2636 | /* only encode if we have parent and dentry */ |
2637 | if (!dir || !dentry) |
2638 | goto success; |
2639 | |
2640 | /* No-op unless this is encrypted */ |
2641 | if (!IS_ENCRYPTED(dir)) |
2642 | goto success; |
2643 | |
2644 | ret = ceph_fscrypt_prepare_readdir(dir); |
2645 | if (ret < 0) |
2646 | return ERR_PTR(error: ret); |
2647 | |
2648 | /* No key? Just ignore it. */ |
2649 | if (!fscrypt_has_encryption_key(inode: dir)) |
2650 | goto success; |
2651 | |
2652 | if (!fscrypt_fname_encrypted_size(inode: dir, orig_len: dentry->d_name.len, NAME_MAX, |
2653 | encrypted_len_ret: &len)) { |
2654 | WARN_ON_ONCE(1); |
2655 | return ERR_PTR(error: -ENAMETOOLONG); |
2656 | } |
2657 | |
2658 | /* No need to append altname if name is short enough */ |
2659 | if (len <= CEPH_NOHASH_NAME_MAX) { |
2660 | len = 0; |
2661 | goto success; |
2662 | } |
2663 | |
2664 | cryptbuf = kmalloc(size: len, GFP_KERNEL); |
2665 | if (!cryptbuf) |
2666 | return ERR_PTR(error: -ENOMEM); |
2667 | |
2668 | ret = fscrypt_fname_encrypt(inode: dir, iname: &dentry->d_name, out: cryptbuf, olen: len); |
2669 | if (ret) { |
2670 | kfree(objp: cryptbuf); |
2671 | return ERR_PTR(error: ret); |
2672 | } |
2673 | success: |
2674 | *plen = len; |
2675 | return cryptbuf; |
2676 | } |
2677 | #else |
2678 | static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) |
2679 | { |
2680 | *plen = 0; |
2681 | return NULL; |
2682 | } |
2683 | #endif |
2684 | |
2685 | /** |
2686 | * ceph_mdsc_build_path - build a path string to a given dentry |
2687 | * @mdsc: mds client |
2688 | * @dentry: dentry to which path should be built |
2689 | * @plen: returned length of string |
2690 | * @pbase: returned base inode number |
2691 | * @for_wire: is this path going to be sent to the MDS? |
2692 | * |
2693 | * Build a string that represents the path to the dentry. This is mostly called |
2694 | * for two different purposes: |
2695 | * |
2696 | * 1) we need to build a path string to send to the MDS (for_wire == true) |
2697 | * 2) we need a path string for local presentation (e.g. debugfs) |
2698 | * (for_wire == false) |
2699 | * |
2700 | * The path is built in reverse, starting with the dentry. Walk back up toward |
2701 | * the root, building the path until the first non-snapped inode is reached |
2702 | * (for_wire) or the root inode is reached (!for_wire). |
2703 | * |
2704 | * Encode hidden .snap dirs as a double /, i.e. |
2705 | * foo/.snap/bar -> foo//bar |
2706 | */ |
2707 | char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry, |
2708 | int *plen, u64 *pbase, int for_wire) |
2709 | { |
2710 | struct ceph_client *cl = mdsc->fsc->client; |
2711 | struct dentry *cur; |
2712 | struct inode *inode; |
2713 | char *path; |
2714 | int pos; |
2715 | unsigned seq; |
2716 | u64 base; |
2717 | |
2718 | if (!dentry) |
2719 | return ERR_PTR(error: -EINVAL); |
2720 | |
2721 | path = __getname(); |
2722 | if (!path) |
2723 | return ERR_PTR(error: -ENOMEM); |
2724 | retry: |
2725 | pos = PATH_MAX - 1; |
2726 | path[pos] = '\0'; |
2727 | |
2728 | seq = read_seqbegin(sl: &rename_lock); |
2729 | cur = dget(dentry); |
2730 | for (;;) { |
2731 | struct dentry *parent; |
2732 | |
2733 | spin_lock(lock: &cur->d_lock); |
2734 | inode = d_inode(dentry: cur); |
2735 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
2736 | doutc(cl, "path+%d: %p SNAPDIR\n" , pos, cur); |
2737 | spin_unlock(lock: &cur->d_lock); |
2738 | parent = dget_parent(dentry: cur); |
2739 | } else if (for_wire && inode && dentry != cur && |
2740 | ceph_snap(inode) == CEPH_NOSNAP) { |
2741 | spin_unlock(lock: &cur->d_lock); |
2742 | pos++; /* get rid of any prepended '/' */ |
2743 | break; |
2744 | } else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) { |
2745 | pos -= cur->d_name.len; |
2746 | if (pos < 0) { |
2747 | spin_unlock(lock: &cur->d_lock); |
2748 | break; |
2749 | } |
2750 | memcpy(path + pos, cur->d_name.name, cur->d_name.len); |
2751 | spin_unlock(lock: &cur->d_lock); |
2752 | parent = dget_parent(dentry: cur); |
2753 | } else { |
2754 | int len, ret; |
2755 | char buf[NAME_MAX]; |
2756 | |
2757 | /* |
2758 | * Proactively copy name into buf, in case we need to |
2759 | * present it as-is. |
2760 | */ |
2761 | memcpy(buf, cur->d_name.name, cur->d_name.len); |
2762 | len = cur->d_name.len; |
2763 | spin_unlock(lock: &cur->d_lock); |
2764 | parent = dget_parent(dentry: cur); |
2765 | |
2766 | ret = ceph_fscrypt_prepare_readdir(dir: d_inode(dentry: parent)); |
2767 | if (ret < 0) { |
2768 | dput(parent); |
2769 | dput(cur); |
2770 | return ERR_PTR(error: ret); |
2771 | } |
2772 | |
2773 | if (fscrypt_has_encryption_key(inode: d_inode(dentry: parent))) { |
2774 | len = ceph_encode_encrypted_fname(parent: d_inode(dentry: parent), |
2775 | dentry: cur, buf); |
2776 | if (len < 0) { |
2777 | dput(parent); |
2778 | dput(cur); |
2779 | return ERR_PTR(error: len); |
2780 | } |
2781 | } |
2782 | pos -= len; |
2783 | if (pos < 0) { |
2784 | dput(parent); |
2785 | break; |
2786 | } |
2787 | memcpy(path + pos, buf, len); |
2788 | } |
2789 | dput(cur); |
2790 | cur = parent; |
2791 | |
2792 | /* Are we at the root? */ |
2793 | if (IS_ROOT(cur)) |
2794 | break; |
2795 | |
2796 | /* Are we out of buffer? */ |
2797 | if (--pos < 0) |
2798 | break; |
2799 | |
2800 | path[pos] = '/'; |
2801 | } |
2802 | inode = d_inode(dentry: cur); |
2803 | base = inode ? ceph_ino(inode) : 0; |
2804 | dput(cur); |
2805 | |
2806 | if (read_seqretry(sl: &rename_lock, start: seq)) |
2807 | goto retry; |
2808 | |
2809 | if (pos < 0) { |
2810 | /* |
2811 | * A rename didn't occur, but somehow we didn't end up where |
2812 | * we thought we would. Throw a warning and try again. |
2813 | */ |
2814 | pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n" , |
2815 | pos); |
2816 | goto retry; |
2817 | } |
2818 | |
2819 | *pbase = base; |
2820 | *plen = PATH_MAX - 1 - pos; |
2821 | doutc(cl, "on %p %d built %llx '%.*s'\n" , dentry, d_count(dentry), |
2822 | base, *plen, path + pos); |
2823 | return path + pos; |
2824 | } |
2825 | |
2826 | static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry, |
2827 | struct inode *dir, const char **ppath, int *ppathlen, |
2828 | u64 *pino, bool *pfreepath, bool parent_locked) |
2829 | { |
2830 | char *path; |
2831 | |
2832 | rcu_read_lock(); |
2833 | if (!dir) |
2834 | dir = d_inode_rcu(dentry: dentry->d_parent); |
2835 | if (dir && parent_locked && ceph_snap(inode: dir) == CEPH_NOSNAP && |
2836 | !IS_ENCRYPTED(dir)) { |
2837 | *pino = ceph_ino(inode: dir); |
2838 | rcu_read_unlock(); |
2839 | *ppath = dentry->d_name.name; |
2840 | *ppathlen = dentry->d_name.len; |
2841 | return 0; |
2842 | } |
2843 | rcu_read_unlock(); |
2844 | path = ceph_mdsc_build_path(mdsc, dentry, plen: ppathlen, pbase: pino, for_wire: 1); |
2845 | if (IS_ERR(ptr: path)) |
2846 | return PTR_ERR(ptr: path); |
2847 | *ppath = path; |
2848 | *pfreepath = true; |
2849 | return 0; |
2850 | } |
2851 | |
2852 | static int build_inode_path(struct inode *inode, |
2853 | const char **ppath, int *ppathlen, u64 *pino, |
2854 | bool *pfreepath) |
2855 | { |
2856 | struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb: inode->i_sb); |
2857 | struct dentry *dentry; |
2858 | char *path; |
2859 | |
2860 | if (ceph_snap(inode) == CEPH_NOSNAP) { |
2861 | *pino = ceph_ino(inode); |
2862 | *ppathlen = 0; |
2863 | return 0; |
2864 | } |
2865 | dentry = d_find_alias(inode); |
2866 | path = ceph_mdsc_build_path(mdsc, dentry, plen: ppathlen, pbase: pino, for_wire: 1); |
2867 | dput(dentry); |
2868 | if (IS_ERR(ptr: path)) |
2869 | return PTR_ERR(ptr: path); |
2870 | *ppath = path; |
2871 | *pfreepath = true; |
2872 | return 0; |
2873 | } |
2874 | |
2875 | /* |
2876 | * request arguments may be specified via an inode *, a dentry *, or |
2877 | * an explicit ino+path. |
2878 | */ |
2879 | static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode, |
2880 | struct dentry *rdentry, struct inode *rdiri, |
2881 | const char *rpath, u64 rino, const char **ppath, |
2882 | int *pathlen, u64 *ino, bool *freepath, |
2883 | bool parent_locked) |
2884 | { |
2885 | struct ceph_client *cl = mdsc->fsc->client; |
2886 | int r = 0; |
2887 | |
2888 | if (rinode) { |
2889 | r = build_inode_path(inode: rinode, ppath, ppathlen: pathlen, pino: ino, pfreepath: freepath); |
2890 | doutc(cl, " inode %p %llx.%llx\n" , rinode, ceph_ino(rinode), |
2891 | ceph_snap(rinode)); |
2892 | } else if (rdentry) { |
2893 | r = build_dentry_path(mdsc, dentry: rdentry, dir: rdiri, ppath, ppathlen: pathlen, pino: ino, |
2894 | pfreepath: freepath, parent_locked); |
2895 | doutc(cl, " dentry %p %llx/%.*s\n" , rdentry, *ino, *pathlen, *ppath); |
2896 | } else if (rpath || rino) { |
2897 | *ino = rino; |
2898 | *ppath = rpath; |
2899 | *pathlen = rpath ? strlen(rpath) : 0; |
2900 | doutc(cl, " path %.*s\n" , *pathlen, rpath); |
2901 | } |
2902 | |
2903 | return r; |
2904 | } |
2905 | |
2906 | static void encode_mclientrequest_tail(void **p, |
2907 | const struct ceph_mds_request *req) |
2908 | { |
2909 | struct ceph_timespec ts; |
2910 | int i; |
2911 | |
2912 | ceph_encode_timespec64(tv: &ts, ts: &req->r_stamp); |
2913 | ceph_encode_copy(p, s: &ts, len: sizeof(ts)); |
2914 | |
2915 | /* v4: gid_list */ |
2916 | ceph_encode_32(p, v: req->r_cred->group_info->ngroups); |
2917 | for (i = 0; i < req->r_cred->group_info->ngroups; i++) |
2918 | ceph_encode_64(p, v: from_kgid(to: &init_user_ns, |
2919 | gid: req->r_cred->group_info->gid[i])); |
2920 | |
2921 | /* v5: altname */ |
2922 | ceph_encode_32(p, v: req->r_altname_len); |
2923 | ceph_encode_copy(p, s: req->r_altname, len: req->r_altname_len); |
2924 | |
2925 | /* v6: fscrypt_auth and fscrypt_file */ |
2926 | if (req->r_fscrypt_auth) { |
2927 | u32 authlen = ceph_fscrypt_auth_len(fa: req->r_fscrypt_auth); |
2928 | |
2929 | ceph_encode_32(p, v: authlen); |
2930 | ceph_encode_copy(p, s: req->r_fscrypt_auth, len: authlen); |
2931 | } else { |
2932 | ceph_encode_32(p, v: 0); |
2933 | } |
2934 | if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) { |
2935 | ceph_encode_32(p, v: sizeof(__le64)); |
2936 | ceph_encode_64(p, v: req->r_fscrypt_file); |
2937 | } else { |
2938 | ceph_encode_32(p, v: 0); |
2939 | } |
2940 | } |
2941 | |
2942 | static inline u16 mds_supported_head_version(struct ceph_mds_session *session) |
2943 | { |
2944 | if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features)) |
2945 | return 1; |
2946 | |
2947 | if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) |
2948 | return 2; |
2949 | |
2950 | return CEPH_MDS_REQUEST_HEAD_VERSION; |
2951 | } |
2952 | |
2953 | static struct ceph_mds_request_head_legacy * |
2954 | find_legacy_request_head(void *p, u64 features) |
2955 | { |
2956 | bool legacy = !(features & CEPH_FEATURE_FS_BTIME); |
2957 | struct ceph_mds_request_head_old *ohead; |
2958 | |
2959 | if (legacy) |
2960 | return (struct ceph_mds_request_head_legacy *)p; |
2961 | ohead = (struct ceph_mds_request_head_old *)p; |
2962 | return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid; |
2963 | } |
2964 | |
2965 | /* |
2966 | * called under mdsc->mutex |
2967 | */ |
2968 | static struct ceph_msg *create_request_message(struct ceph_mds_session *session, |
2969 | struct ceph_mds_request *req, |
2970 | bool drop_cap_releases) |
2971 | { |
2972 | int mds = session->s_mds; |
2973 | struct ceph_mds_client *mdsc = session->s_mdsc; |
2974 | struct ceph_client *cl = mdsc->fsc->client; |
2975 | struct ceph_msg *msg; |
2976 | struct ceph_mds_request_head_legacy *lhead; |
2977 | const char *path1 = NULL; |
2978 | const char *path2 = NULL; |
2979 | u64 ino1 = 0, ino2 = 0; |
2980 | int pathlen1 = 0, pathlen2 = 0; |
2981 | bool freepath1 = false, freepath2 = false; |
2982 | struct dentry *old_dentry = NULL; |
2983 | int len; |
2984 | u16 releases; |
2985 | void *p, *end; |
2986 | int ret; |
2987 | bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME); |
2988 | u16 request_head_version = mds_supported_head_version(session); |
2989 | kuid_t caller_fsuid = req->r_cred->fsuid; |
2990 | kgid_t caller_fsgid = req->r_cred->fsgid; |
2991 | |
2992 | ret = set_request_path_attr(mdsc, rinode: req->r_inode, rdentry: req->r_dentry, |
2993 | rdiri: req->r_parent, rpath: req->r_path1, rino: req->r_ino1.ino, |
2994 | ppath: &path1, pathlen: &pathlen1, ino: &ino1, freepath: &freepath1, |
2995 | test_bit(CEPH_MDS_R_PARENT_LOCKED, |
2996 | &req->r_req_flags)); |
2997 | if (ret < 0) { |
2998 | msg = ERR_PTR(error: ret); |
2999 | goto out; |
3000 | } |
3001 | |
3002 | /* If r_old_dentry is set, then assume that its parent is locked */ |
3003 | if (req->r_old_dentry && |
3004 | !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED)) |
3005 | old_dentry = req->r_old_dentry; |
3006 | ret = set_request_path_attr(mdsc, NULL, rdentry: old_dentry, |
3007 | rdiri: req->r_old_dentry_dir, |
3008 | rpath: req->r_path2, rino: req->r_ino2.ino, |
3009 | ppath: &path2, pathlen: &pathlen2, ino: &ino2, freepath: &freepath2, parent_locked: true); |
3010 | if (ret < 0) { |
3011 | msg = ERR_PTR(error: ret); |
3012 | goto out_free1; |
3013 | } |
3014 | |
3015 | req->r_altname = get_fscrypt_altname(req, plen: &req->r_altname_len); |
3016 | if (IS_ERR(ptr: req->r_altname)) { |
3017 | msg = ERR_CAST(ptr: req->r_altname); |
3018 | req->r_altname = NULL; |
3019 | goto out_free2; |
3020 | } |
3021 | |
3022 | /* |
3023 | * For old cephs without supporting the 32bit retry/fwd feature |
3024 | * it will copy the raw memories directly when decoding the |
3025 | * requests. While new cephs will decode the head depending the |
3026 | * version member, so we need to make sure it will be compatible |
3027 | * with them both. |
3028 | */ |
3029 | if (legacy) |
3030 | len = sizeof(struct ceph_mds_request_head_legacy); |
3031 | else if (request_head_version == 1) |
3032 | len = sizeof(struct ceph_mds_request_head_old); |
3033 | else if (request_head_version == 2) |
3034 | len = offsetofend(struct ceph_mds_request_head, ext_num_fwd); |
3035 | else |
3036 | len = sizeof(struct ceph_mds_request_head); |
3037 | |
3038 | /* filepaths */ |
3039 | len += 2 * (1 + sizeof(u32) + sizeof(u64)); |
3040 | len += pathlen1 + pathlen2; |
3041 | |
3042 | /* cap releases */ |
3043 | len += sizeof(struct ceph_mds_request_release) * |
3044 | (!!req->r_inode_drop + !!req->r_dentry_drop + |
3045 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); |
3046 | |
3047 | if (req->r_dentry_drop) |
3048 | len += pathlen1; |
3049 | if (req->r_old_dentry_drop) |
3050 | len += pathlen2; |
3051 | |
3052 | /* MClientRequest tail */ |
3053 | |
3054 | /* req->r_stamp */ |
3055 | len += sizeof(struct ceph_timespec); |
3056 | |
3057 | /* gid list */ |
3058 | len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups); |
3059 | |
3060 | /* alternate name */ |
3061 | len += sizeof(u32) + req->r_altname_len; |
3062 | |
3063 | /* fscrypt_auth */ |
3064 | len += sizeof(u32); // fscrypt_auth |
3065 | if (req->r_fscrypt_auth) |
3066 | len += ceph_fscrypt_auth_len(fa: req->r_fscrypt_auth); |
3067 | |
3068 | /* fscrypt_file */ |
3069 | len += sizeof(u32); |
3070 | if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) |
3071 | len += sizeof(__le64); |
3072 | |
3073 | msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, front_len: len, max_data_items: 1, GFP_NOFS, can_fail: false); |
3074 | if (!msg) { |
3075 | msg = ERR_PTR(error: -ENOMEM); |
3076 | goto out_free2; |
3077 | } |
3078 | |
3079 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
3080 | |
3081 | lhead = find_legacy_request_head(p: msg->front.iov_base, |
3082 | features: session->s_con.peer_features); |
3083 | |
3084 | if ((req->r_mnt_idmap != &nop_mnt_idmap) && |
3085 | !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) { |
3086 | WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op)); |
3087 | |
3088 | if (enable_unsafe_idmap) { |
3089 | pr_warn_once_client(cl, |
3090 | "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID" |
3091 | " is not supported by MDS. UID/GID-based restrictions may" |
3092 | " not work properly.\n" ); |
3093 | |
3094 | caller_fsuid = from_vfsuid(idmap: req->r_mnt_idmap, fs_userns: &init_user_ns, |
3095 | VFSUIDT_INIT(req->r_cred->fsuid)); |
3096 | caller_fsgid = from_vfsgid(idmap: req->r_mnt_idmap, fs_userns: &init_user_ns, |
3097 | VFSGIDT_INIT(req->r_cred->fsgid)); |
3098 | } else { |
3099 | pr_err_ratelimited_client(cl, |
3100 | "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID" |
3101 | " is not supported by MDS. Fail request with -EIO.\n" ); |
3102 | |
3103 | ret = -EIO; |
3104 | goto out_err; |
3105 | } |
3106 | } |
3107 | |
3108 | /* |
3109 | * The ceph_mds_request_head_legacy didn't contain a version field, and |
3110 | * one was added when we moved the message version from 3->4. |
3111 | */ |
3112 | if (legacy) { |
3113 | msg->hdr.version = cpu_to_le16(3); |
3114 | p = msg->front.iov_base + sizeof(*lhead); |
3115 | } else if (request_head_version == 1) { |
3116 | struct ceph_mds_request_head_old *ohead = msg->front.iov_base; |
3117 | |
3118 | msg->hdr.version = cpu_to_le16(4); |
3119 | ohead->version = cpu_to_le16(1); |
3120 | p = msg->front.iov_base + sizeof(*ohead); |
3121 | } else if (request_head_version == 2) { |
3122 | struct ceph_mds_request_head *nhead = msg->front.iov_base; |
3123 | |
3124 | msg->hdr.version = cpu_to_le16(6); |
3125 | nhead->version = cpu_to_le16(2); |
3126 | |
3127 | p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd); |
3128 | } else { |
3129 | struct ceph_mds_request_head *nhead = msg->front.iov_base; |
3130 | kuid_t owner_fsuid; |
3131 | kgid_t owner_fsgid; |
3132 | |
3133 | msg->hdr.version = cpu_to_le16(6); |
3134 | nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION); |
3135 | nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head)); |
3136 | |
3137 | if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) { |
3138 | owner_fsuid = from_vfsuid(idmap: req->r_mnt_idmap, fs_userns: &init_user_ns, |
3139 | VFSUIDT_INIT(req->r_cred->fsuid)); |
3140 | owner_fsgid = from_vfsgid(idmap: req->r_mnt_idmap, fs_userns: &init_user_ns, |
3141 | VFSGIDT_INIT(req->r_cred->fsgid)); |
3142 | nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid)); |
3143 | nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid)); |
3144 | } else { |
3145 | nhead->owner_uid = cpu_to_le32(-1); |
3146 | nhead->owner_gid = cpu_to_le32(-1); |
3147 | } |
3148 | |
3149 | p = msg->front.iov_base + sizeof(*nhead); |
3150 | } |
3151 | |
3152 | end = msg->front.iov_base + msg->front.iov_len; |
3153 | |
3154 | lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); |
3155 | lhead->op = cpu_to_le32(req->r_op); |
3156 | lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, |
3157 | caller_fsuid)); |
3158 | lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, |
3159 | caller_fsgid)); |
3160 | lhead->ino = cpu_to_le64(req->r_deleg_ino); |
3161 | lhead->args = req->r_args; |
3162 | |
3163 | ceph_encode_filepath(p: &p, end, ino: ino1, path: path1); |
3164 | ceph_encode_filepath(p: &p, end, ino: ino2, path: path2); |
3165 | |
3166 | /* make note of release offset, in case we need to replay */ |
3167 | req->r_request_release_offset = p - msg->front.iov_base; |
3168 | |
3169 | /* cap releases */ |
3170 | releases = 0; |
3171 | if (req->r_inode_drop) |
3172 | releases += ceph_encode_inode_release(p: &p, |
3173 | inode: req->r_inode ? req->r_inode : d_inode(dentry: req->r_dentry), |
3174 | mds, drop: req->r_inode_drop, unless: req->r_inode_unless, |
3175 | force: req->r_op == CEPH_MDS_OP_READDIR); |
3176 | if (req->r_dentry_drop) { |
3177 | ret = ceph_encode_dentry_release(p: &p, dn: req->r_dentry, |
3178 | dir: req->r_parent, mds, drop: req->r_dentry_drop, |
3179 | unless: req->r_dentry_unless); |
3180 | if (ret < 0) |
3181 | goto out_err; |
3182 | releases += ret; |
3183 | } |
3184 | if (req->r_old_dentry_drop) { |
3185 | ret = ceph_encode_dentry_release(p: &p, dn: req->r_old_dentry, |
3186 | dir: req->r_old_dentry_dir, mds, |
3187 | drop: req->r_old_dentry_drop, |
3188 | unless: req->r_old_dentry_unless); |
3189 | if (ret < 0) |
3190 | goto out_err; |
3191 | releases += ret; |
3192 | } |
3193 | if (req->r_old_inode_drop) |
3194 | releases += ceph_encode_inode_release(p: &p, |
3195 | inode: d_inode(dentry: req->r_old_dentry), |
3196 | mds, drop: req->r_old_inode_drop, unless: req->r_old_inode_unless, force: 0); |
3197 | |
3198 | if (drop_cap_releases) { |
3199 | releases = 0; |
3200 | p = msg->front.iov_base + req->r_request_release_offset; |
3201 | } |
3202 | |
3203 | lhead->num_releases = cpu_to_le16(releases); |
3204 | |
3205 | encode_mclientrequest_tail(p: &p, req); |
3206 | |
3207 | if (WARN_ON_ONCE(p > end)) { |
3208 | ceph_msg_put(msg); |
3209 | msg = ERR_PTR(error: -ERANGE); |
3210 | goto out_free2; |
3211 | } |
3212 | |
3213 | msg->front.iov_len = p - msg->front.iov_base; |
3214 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
3215 | |
3216 | if (req->r_pagelist) { |
3217 | struct ceph_pagelist *pagelist = req->r_pagelist; |
3218 | ceph_msg_data_add_pagelist(msg, pagelist); |
3219 | msg->hdr.data_len = cpu_to_le32(pagelist->length); |
3220 | } else { |
3221 | msg->hdr.data_len = 0; |
3222 | } |
3223 | |
3224 | msg->hdr.data_off = cpu_to_le16(0); |
3225 | |
3226 | out_free2: |
3227 | if (freepath2) |
3228 | ceph_mdsc_free_path(path: (char *)path2, len: pathlen2); |
3229 | out_free1: |
3230 | if (freepath1) |
3231 | ceph_mdsc_free_path(path: (char *)path1, len: pathlen1); |
3232 | out: |
3233 | return msg; |
3234 | out_err: |
3235 | ceph_msg_put(msg); |
3236 | msg = ERR_PTR(error: ret); |
3237 | goto out_free2; |
3238 | } |
3239 | |
3240 | /* |
3241 | * called under mdsc->mutex if error, under no mutex if |
3242 | * success. |
3243 | */ |
3244 | static void complete_request(struct ceph_mds_client *mdsc, |
3245 | struct ceph_mds_request *req) |
3246 | { |
3247 | req->r_end_latency = ktime_get(); |
3248 | |
3249 | if (req->r_callback) |
3250 | req->r_callback(mdsc, req); |
3251 | complete_all(&req->r_completion); |
3252 | } |
3253 | |
3254 | /* |
3255 | * called under mdsc->mutex |
3256 | */ |
3257 | static int __prepare_send_request(struct ceph_mds_session *session, |
3258 | struct ceph_mds_request *req, |
3259 | bool drop_cap_releases) |
3260 | { |
3261 | int mds = session->s_mds; |
3262 | struct ceph_mds_client *mdsc = session->s_mdsc; |
3263 | struct ceph_client *cl = mdsc->fsc->client; |
3264 | struct ceph_mds_request_head_legacy *lhead; |
3265 | struct ceph_mds_request_head *nhead; |
3266 | struct ceph_msg *msg; |
3267 | int flags = 0, old_max_retry; |
3268 | bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, |
3269 | &session->s_features); |
3270 | |
3271 | /* |
3272 | * Avoid inifinite retrying after overflow. The client will |
3273 | * increase the retry count and if the MDS is old version, |
3274 | * so we limit to retry at most 256 times. |
3275 | */ |
3276 | if (req->r_attempts) { |
3277 | old_max_retry = sizeof_field(struct ceph_mds_request_head_old, |
3278 | num_retry); |
3279 | old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE); |
3280 | if ((old_version && req->r_attempts >= old_max_retry) || |
3281 | ((uint32_t)req->r_attempts >= U32_MAX)) { |
3282 | pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n" , |
3283 | req->r_tid); |
3284 | return -EMULTIHOP; |
3285 | } |
3286 | } |
3287 | |
3288 | req->r_attempts++; |
3289 | if (req->r_inode) { |
3290 | struct ceph_cap *cap = |
3291 | ceph_get_cap_for_mds(ci: ceph_inode(inode: req->r_inode), mds); |
3292 | |
3293 | if (cap) |
3294 | req->r_sent_on_mseq = cap->mseq; |
3295 | else |
3296 | req->r_sent_on_mseq = -1; |
3297 | } |
3298 | doutc(cl, "%p tid %lld %s (attempt %d)\n" , req, req->r_tid, |
3299 | ceph_mds_op_name(req->r_op), req->r_attempts); |
3300 | |
3301 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
3302 | void *p; |
3303 | |
3304 | /* |
3305 | * Replay. Do not regenerate message (and rebuild |
3306 | * paths, etc.); just use the original message. |
3307 | * Rebuilding paths will break for renames because |
3308 | * d_move mangles the src name. |
3309 | */ |
3310 | msg = req->r_request; |
3311 | lhead = find_legacy_request_head(p: msg->front.iov_base, |
3312 | features: session->s_con.peer_features); |
3313 | |
3314 | flags = le32_to_cpu(lhead->flags); |
3315 | flags |= CEPH_MDS_FLAG_REPLAY; |
3316 | lhead->flags = cpu_to_le32(flags); |
3317 | |
3318 | if (req->r_target_inode) |
3319 | lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); |
3320 | |
3321 | lhead->num_retry = req->r_attempts - 1; |
3322 | if (!old_version) { |
3323 | nhead = (struct ceph_mds_request_head*)msg->front.iov_base; |
3324 | nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1); |
3325 | } |
3326 | |
3327 | /* remove cap/dentry releases from message */ |
3328 | lhead->num_releases = 0; |
3329 | |
3330 | p = msg->front.iov_base + req->r_request_release_offset; |
3331 | encode_mclientrequest_tail(p: &p, req); |
3332 | |
3333 | msg->front.iov_len = p - msg->front.iov_base; |
3334 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
3335 | return 0; |
3336 | } |
3337 | |
3338 | if (req->r_request) { |
3339 | ceph_msg_put(msg: req->r_request); |
3340 | req->r_request = NULL; |
3341 | } |
3342 | msg = create_request_message(session, req, drop_cap_releases); |
3343 | if (IS_ERR(ptr: msg)) { |
3344 | req->r_err = PTR_ERR(ptr: msg); |
3345 | return PTR_ERR(ptr: msg); |
3346 | } |
3347 | req->r_request = msg; |
3348 | |
3349 | lhead = find_legacy_request_head(p: msg->front.iov_base, |
3350 | features: session->s_con.peer_features); |
3351 | lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
3352 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
3353 | flags |= CEPH_MDS_FLAG_REPLAY; |
3354 | if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) |
3355 | flags |= CEPH_MDS_FLAG_ASYNC; |
3356 | if (req->r_parent) |
3357 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; |
3358 | lhead->flags = cpu_to_le32(flags); |
3359 | lhead->num_fwd = req->r_num_fwd; |
3360 | lhead->num_retry = req->r_attempts - 1; |
3361 | if (!old_version) { |
3362 | nhead = (struct ceph_mds_request_head*)msg->front.iov_base; |
3363 | nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd); |
3364 | nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1); |
3365 | } |
3366 | |
3367 | doutc(cl, " r_parent = %p\n" , req->r_parent); |
3368 | return 0; |
3369 | } |
3370 | |
3371 | /* |
3372 | * called under mdsc->mutex |
3373 | */ |
3374 | static int __send_request(struct ceph_mds_session *session, |
3375 | struct ceph_mds_request *req, |
3376 | bool drop_cap_releases) |
3377 | { |
3378 | int err; |
3379 | |
3380 | err = __prepare_send_request(session, req, drop_cap_releases); |
3381 | if (!err) { |
3382 | ceph_msg_get(msg: req->r_request); |
3383 | ceph_con_send(con: &session->s_con, msg: req->r_request); |
3384 | } |
3385 | |
3386 | return err; |
3387 | } |
3388 | |
3389 | /* |
3390 | * send request, or put it on the appropriate wait list. |
3391 | */ |
3392 | static void __do_request(struct ceph_mds_client *mdsc, |
3393 | struct ceph_mds_request *req) |
3394 | { |
3395 | struct ceph_client *cl = mdsc->fsc->client; |
3396 | struct ceph_mds_session *session = NULL; |
3397 | int mds = -1; |
3398 | int err = 0; |
3399 | bool random; |
3400 | |
3401 | if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
3402 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) |
3403 | __unregister_request(mdsc, req); |
3404 | return; |
3405 | } |
3406 | |
3407 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) { |
3408 | doutc(cl, "metadata corrupted\n" ); |
3409 | err = -EIO; |
3410 | goto finish; |
3411 | } |
3412 | if (req->r_timeout && |
3413 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { |
3414 | doutc(cl, "timed out\n" ); |
3415 | err = -ETIMEDOUT; |
3416 | goto finish; |
3417 | } |
3418 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { |
3419 | doutc(cl, "forced umount\n" ); |
3420 | err = -EIO; |
3421 | goto finish; |
3422 | } |
3423 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { |
3424 | if (mdsc->mdsmap_err) { |
3425 | err = mdsc->mdsmap_err; |
3426 | doutc(cl, "mdsmap err %d\n" , err); |
3427 | goto finish; |
3428 | } |
3429 | if (mdsc->mdsmap->m_epoch == 0) { |
3430 | doutc(cl, "no mdsmap, waiting for map\n" ); |
3431 | list_add(new: &req->r_wait, head: &mdsc->waiting_for_map); |
3432 | return; |
3433 | } |
3434 | if (!(mdsc->fsc->mount_options->flags & |
3435 | CEPH_MOUNT_OPT_MOUNTWAIT) && |
3436 | !ceph_mdsmap_is_cluster_available(m: mdsc->mdsmap)) { |
3437 | err = -EHOSTUNREACH; |
3438 | goto finish; |
3439 | } |
3440 | } |
3441 | |
3442 | put_request_session(req); |
3443 | |
3444 | mds = __choose_mds(mdsc, req, random: &random); |
3445 | if (mds < 0 || |
3446 | ceph_mdsmap_get_state(m: mdsc->mdsmap, w: mds) < CEPH_MDS_STATE_ACTIVE) { |
3447 | if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { |
3448 | err = -EJUKEBOX; |
3449 | goto finish; |
3450 | } |
3451 | doutc(cl, "no mds or not active, waiting for map\n" ); |
3452 | list_add(new: &req->r_wait, head: &mdsc->waiting_for_map); |
3453 | return; |
3454 | } |
3455 | |
3456 | /* get, open session */ |
3457 | session = __ceph_lookup_mds_session(mdsc, mds); |
3458 | if (!session) { |
3459 | session = register_session(mdsc, mds); |
3460 | if (IS_ERR(ptr: session)) { |
3461 | err = PTR_ERR(ptr: session); |
3462 | goto finish; |
3463 | } |
3464 | } |
3465 | req->r_session = ceph_get_mds_session(s: session); |
3466 | |
3467 | doutc(cl, "mds%d session %p state %s\n" , mds, session, |
3468 | ceph_session_state_name(session->s_state)); |
3469 | |
3470 | /* |
3471 | * The old ceph will crash the MDSs when see unknown OPs |
3472 | */ |
3473 | if (req->r_feature_needed > 0 && |
3474 | !test_bit(req->r_feature_needed, &session->s_features)) { |
3475 | err = -EOPNOTSUPP; |
3476 | goto out_session; |
3477 | } |
3478 | |
3479 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
3480 | session->s_state != CEPH_MDS_SESSION_HUNG) { |
3481 | /* |
3482 | * We cannot queue async requests since the caps and delegated |
3483 | * inodes are bound to the session. Just return -EJUKEBOX and |
3484 | * let the caller retry a sync request in that case. |
3485 | */ |
3486 | if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { |
3487 | err = -EJUKEBOX; |
3488 | goto out_session; |
3489 | } |
3490 | |
3491 | /* |
3492 | * If the session has been REJECTED, then return a hard error, |
3493 | * unless it's a CLEANRECOVER mount, in which case we'll queue |
3494 | * it to the mdsc queue. |
3495 | */ |
3496 | if (session->s_state == CEPH_MDS_SESSION_REJECTED) { |
3497 | if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) |
3498 | list_add(new: &req->r_wait, head: &mdsc->waiting_for_map); |
3499 | else |
3500 | err = -EACCES; |
3501 | goto out_session; |
3502 | } |
3503 | |
3504 | if (session->s_state == CEPH_MDS_SESSION_NEW || |
3505 | session->s_state == CEPH_MDS_SESSION_CLOSING) { |
3506 | err = __open_session(mdsc, session); |
3507 | if (err) |
3508 | goto out_session; |
3509 | /* retry the same mds later */ |
3510 | if (random) |
3511 | req->r_resend_mds = mds; |
3512 | } |
3513 | list_add(new: &req->r_wait, head: &session->s_waiting); |
3514 | goto out_session; |
3515 | } |
3516 | |
3517 | /* send request */ |
3518 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
3519 | |
3520 | if (req->r_request_started == 0) /* note request start time */ |
3521 | req->r_request_started = jiffies; |
3522 | |
3523 | /* |
3524 | * For async create we will choose the auth MDS of frag in parent |
3525 | * directory to send the request and ususally this works fine, but |
3526 | * if the migrated the dirtory to another MDS before it could handle |
3527 | * it the request will be forwarded. |
3528 | * |
3529 | * And then the auth cap will be changed. |
3530 | */ |
3531 | if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) { |
3532 | struct ceph_dentry_info *di = ceph_dentry(dentry: req->r_dentry); |
3533 | struct ceph_inode_info *ci; |
3534 | struct ceph_cap *cap; |
3535 | |
3536 | /* |
3537 | * The request maybe handled very fast and the new inode |
3538 | * hasn't been linked to the dentry yet. We need to wait |
3539 | * for the ceph_finish_async_create(), which shouldn't be |
3540 | * stuck too long or fail in thoery, to finish when forwarding |
3541 | * the request. |
3542 | */ |
3543 | if (!d_inode(dentry: req->r_dentry)) { |
3544 | err = wait_on_bit(word: &di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT, |
3545 | TASK_KILLABLE); |
3546 | if (err) { |
3547 | mutex_lock(&req->r_fill_mutex); |
3548 | set_bit(CEPH_MDS_R_ABORTED, addr: &req->r_req_flags); |
3549 | mutex_unlock(lock: &req->r_fill_mutex); |
3550 | goto out_session; |
3551 | } |
3552 | } |
3553 | |
3554 | ci = ceph_inode(inode: d_inode(dentry: req->r_dentry)); |
3555 | |
3556 | spin_lock(lock: &ci->i_ceph_lock); |
3557 | cap = ci->i_auth_cap; |
3558 | if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) { |
3559 | doutc(cl, "session changed for auth cap %d -> %d\n" , |
3560 | cap->session->s_mds, session->s_mds); |
3561 | |
3562 | /* Remove the auth cap from old session */ |
3563 | spin_lock(lock: &cap->session->s_cap_lock); |
3564 | cap->session->s_nr_caps--; |
3565 | list_del_init(entry: &cap->session_caps); |
3566 | spin_unlock(lock: &cap->session->s_cap_lock); |
3567 | |
3568 | /* Add the auth cap to the new session */ |
3569 | cap->mds = mds; |
3570 | cap->session = session; |
3571 | spin_lock(lock: &session->s_cap_lock); |
3572 | session->s_nr_caps++; |
3573 | list_add_tail(new: &cap->session_caps, head: &session->s_caps); |
3574 | spin_unlock(lock: &session->s_cap_lock); |
3575 | |
3576 | change_auth_cap_ses(ci, session); |
3577 | } |
3578 | spin_unlock(lock: &ci->i_ceph_lock); |
3579 | } |
3580 | |
3581 | err = __send_request(session, req, drop_cap_releases: false); |
3582 | |
3583 | out_session: |
3584 | ceph_put_mds_session(s: session); |
3585 | finish: |
3586 | if (err) { |
3587 | doutc(cl, "early error %d\n" , err); |
3588 | req->r_err = err; |
3589 | complete_request(mdsc, req); |
3590 | __unregister_request(mdsc, req); |
3591 | } |
3592 | return; |
3593 | } |
3594 | |
3595 | /* |
3596 | * called under mdsc->mutex |
3597 | */ |
3598 | static void __wake_requests(struct ceph_mds_client *mdsc, |
3599 | struct list_head *head) |
3600 | { |
3601 | struct ceph_client *cl = mdsc->fsc->client; |
3602 | struct ceph_mds_request *req; |
3603 | LIST_HEAD(tmp_list); |
3604 | |
3605 | list_splice_init(list: head, head: &tmp_list); |
3606 | |
3607 | while (!list_empty(head: &tmp_list)) { |
3608 | req = list_entry(tmp_list.next, |
3609 | struct ceph_mds_request, r_wait); |
3610 | list_del_init(entry: &req->r_wait); |
3611 | doutc(cl, " wake request %p tid %llu\n" , req, |
3612 | req->r_tid); |
3613 | __do_request(mdsc, req); |
3614 | } |
3615 | } |
3616 | |
3617 | /* |
3618 | * Wake up threads with requests pending for @mds, so that they can |
3619 | * resubmit their requests to a possibly different mds. |
3620 | */ |
3621 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
3622 | { |
3623 | struct ceph_client *cl = mdsc->fsc->client; |
3624 | struct ceph_mds_request *req; |
3625 | struct rb_node *p = rb_first(&mdsc->request_tree); |
3626 | |
3627 | doutc(cl, "kick_requests mds%d\n" , mds); |
3628 | while (p) { |
3629 | req = rb_entry(p, struct ceph_mds_request, r_node); |
3630 | p = rb_next(p); |
3631 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
3632 | continue; |
3633 | if (req->r_attempts > 0) |
3634 | continue; /* only new requests */ |
3635 | if (req->r_session && |
3636 | req->r_session->s_mds == mds) { |
3637 | doutc(cl, " kicking tid %llu\n" , req->r_tid); |
3638 | list_del_init(entry: &req->r_wait); |
3639 | __do_request(mdsc, req); |
3640 | } |
3641 | } |
3642 | } |
3643 | |
3644 | int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, |
3645 | struct ceph_mds_request *req) |
3646 | { |
3647 | struct ceph_client *cl = mdsc->fsc->client; |
3648 | int err = 0; |
3649 | |
3650 | /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ |
3651 | if (req->r_inode) |
3652 | ceph_get_cap_refs(ci: ceph_inode(inode: req->r_inode), CEPH_CAP_PIN); |
3653 | if (req->r_parent) { |
3654 | struct ceph_inode_info *ci = ceph_inode(inode: req->r_parent); |
3655 | int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ? |
3656 | CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD; |
3657 | spin_lock(lock: &ci->i_ceph_lock); |
3658 | ceph_take_cap_refs(ci, CEPH_CAP_PIN, snap_rwsem_locked: false); |
3659 | __ceph_touch_fmode(ci, mdsc, fmode); |
3660 | spin_unlock(lock: &ci->i_ceph_lock); |
3661 | } |
3662 | if (req->r_old_dentry_dir) |
3663 | ceph_get_cap_refs(ci: ceph_inode(inode: req->r_old_dentry_dir), |
3664 | CEPH_CAP_PIN); |
3665 | |
3666 | if (req->r_inode) { |
3667 | err = ceph_wait_on_async_create(inode: req->r_inode); |
3668 | if (err) { |
3669 | doutc(cl, "wait for async create returned: %d\n" , err); |
3670 | return err; |
3671 | } |
3672 | } |
3673 | |
3674 | if (!err && req->r_old_inode) { |
3675 | err = ceph_wait_on_async_create(inode: req->r_old_inode); |
3676 | if (err) { |
3677 | doutc(cl, "wait for async create returned: %d\n" , err); |
3678 | return err; |
3679 | } |
3680 | } |
3681 | |
3682 | doutc(cl, "submit_request on %p for inode %p\n" , req, dir); |
3683 | mutex_lock(&mdsc->mutex); |
3684 | __register_request(mdsc, req, dir); |
3685 | __do_request(mdsc, req); |
3686 | err = req->r_err; |
3687 | mutex_unlock(lock: &mdsc->mutex); |
3688 | return err; |
3689 | } |
3690 | |
3691 | int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc, |
3692 | struct ceph_mds_request *req, |
3693 | ceph_mds_request_wait_callback_t wait_func) |
3694 | { |
3695 | struct ceph_client *cl = mdsc->fsc->client; |
3696 | int err; |
3697 | |
3698 | /* wait */ |
3699 | doutc(cl, "do_request waiting\n" ); |
3700 | if (wait_func) { |
3701 | err = wait_func(mdsc, req); |
3702 | } else { |
3703 | long timeleft = wait_for_completion_killable_timeout( |
3704 | x: &req->r_completion, |
3705 | timeout: ceph_timeout_jiffies(timeout: req->r_timeout)); |
3706 | if (timeleft > 0) |
3707 | err = 0; |
3708 | else if (!timeleft) |
3709 | err = -ETIMEDOUT; /* timed out */ |
3710 | else |
3711 | err = timeleft; /* killed */ |
3712 | } |
3713 | doutc(cl, "do_request waited, got %d\n" , err); |
3714 | mutex_lock(&mdsc->mutex); |
3715 | |
3716 | /* only abort if we didn't race with a real reply */ |
3717 | if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
3718 | err = le32_to_cpu(req->r_reply_info.head->result); |
3719 | } else if (err < 0) { |
3720 | doutc(cl, "aborted request %lld with %d\n" , req->r_tid, err); |
3721 | |
3722 | /* |
3723 | * ensure we aren't running concurrently with |
3724 | * ceph_fill_trace or ceph_readdir_prepopulate, which |
3725 | * rely on locks (dir mutex) held by our caller. |
3726 | */ |
3727 | mutex_lock(&req->r_fill_mutex); |
3728 | req->r_err = err; |
3729 | set_bit(CEPH_MDS_R_ABORTED, addr: &req->r_req_flags); |
3730 | mutex_unlock(lock: &req->r_fill_mutex); |
3731 | |
3732 | if (req->r_parent && |
3733 | (req->r_op & CEPH_MDS_OP_WRITE)) |
3734 | ceph_invalidate_dir_request(req); |
3735 | } else { |
3736 | err = req->r_err; |
3737 | } |
3738 | |
3739 | mutex_unlock(lock: &mdsc->mutex); |
3740 | return err; |
3741 | } |
3742 | |
3743 | /* |
3744 | * Synchrously perform an mds request. Take care of all of the |
3745 | * session setup, forwarding, retry details. |
3746 | */ |
3747 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, |
3748 | struct inode *dir, |
3749 | struct ceph_mds_request *req) |
3750 | { |
3751 | struct ceph_client *cl = mdsc->fsc->client; |
3752 | int err; |
3753 | |
3754 | doutc(cl, "do_request on %p\n" , req); |
3755 | |
3756 | /* issue */ |
3757 | err = ceph_mdsc_submit_request(mdsc, dir, req); |
3758 | if (!err) |
3759 | err = ceph_mdsc_wait_request(mdsc, req, NULL); |
3760 | doutc(cl, "do_request %p done, result %d\n" , req, err); |
3761 | return err; |
3762 | } |
3763 | |
3764 | /* |
3765 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
3766 | * namespace request. |
3767 | */ |
3768 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) |
3769 | { |
3770 | struct inode *dir = req->r_parent; |
3771 | struct inode *old_dir = req->r_old_dentry_dir; |
3772 | struct ceph_client *cl = req->r_mdsc->fsc->client; |
3773 | |
3774 | doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n" , |
3775 | dir, old_dir); |
3776 | |
3777 | ceph_dir_clear_complete(inode: dir); |
3778 | if (old_dir) |
3779 | ceph_dir_clear_complete(inode: old_dir); |
3780 | if (req->r_dentry) |
3781 | ceph_invalidate_dentry_lease(dentry: req->r_dentry); |
3782 | if (req->r_old_dentry) |
3783 | ceph_invalidate_dentry_lease(dentry: req->r_old_dentry); |
3784 | } |
3785 | |
3786 | /* |
3787 | * Handle mds reply. |
3788 | * |
3789 | * We take the session mutex and parse and process the reply immediately. |
3790 | * This preserves the logical ordering of replies, capabilities, etc., sent |
3791 | * by the MDS as they are applied to our local cache. |
3792 | */ |
3793 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) |
3794 | { |
3795 | struct ceph_mds_client *mdsc = session->s_mdsc; |
3796 | struct ceph_client *cl = mdsc->fsc->client; |
3797 | struct ceph_mds_request *req; |
3798 | struct ceph_mds_reply_head *head = msg->front.iov_base; |
3799 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ |
3800 | struct ceph_snap_realm *realm; |
3801 | u64 tid; |
3802 | int err, result; |
3803 | int mds = session->s_mds; |
3804 | bool close_sessions = false; |
3805 | |
3806 | if (msg->front.iov_len < sizeof(*head)) { |
3807 | pr_err_client(cl, "got corrupt (short) reply\n" ); |
3808 | ceph_msg_dump(msg); |
3809 | return; |
3810 | } |
3811 | |
3812 | /* get request, session */ |
3813 | tid = le64_to_cpu(msg->hdr.tid); |
3814 | mutex_lock(&mdsc->mutex); |
3815 | req = lookup_get_request(mdsc, tid); |
3816 | if (!req) { |
3817 | doutc(cl, "on unknown tid %llu\n" , tid); |
3818 | mutex_unlock(lock: &mdsc->mutex); |
3819 | return; |
3820 | } |
3821 | doutc(cl, "handle_reply %p\n" , req); |
3822 | |
3823 | /* correct session? */ |
3824 | if (req->r_session != session) { |
3825 | pr_err_client(cl, "got %llu on session mds%d not mds%d\n" , |
3826 | tid, session->s_mds, |
3827 | req->r_session ? req->r_session->s_mds : -1); |
3828 | mutex_unlock(lock: &mdsc->mutex); |
3829 | goto out; |
3830 | } |
3831 | |
3832 | /* dup? */ |
3833 | if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || |
3834 | (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { |
3835 | pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n" , |
3836 | head->safe ? "safe" : "unsafe" , tid, mds); |
3837 | mutex_unlock(lock: &mdsc->mutex); |
3838 | goto out; |
3839 | } |
3840 | if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { |
3841 | pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n" , |
3842 | tid, mds); |
3843 | mutex_unlock(lock: &mdsc->mutex); |
3844 | goto out; |
3845 | } |
3846 | |
3847 | result = le32_to_cpu(head->result); |
3848 | |
3849 | if (head->safe) { |
3850 | set_bit(CEPH_MDS_R_GOT_SAFE, addr: &req->r_req_flags); |
3851 | __unregister_request(mdsc, req); |
3852 | |
3853 | /* last request during umount? */ |
3854 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
3855 | complete_all(&mdsc->safe_umount_waiters); |
3856 | |
3857 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
3858 | /* |
3859 | * We already handled the unsafe response, now do the |
3860 | * cleanup. No need to examine the response; the MDS |
3861 | * doesn't include any result info in the safe |
3862 | * response. And even if it did, there is nothing |
3863 | * useful we could do with a revised return value. |
3864 | */ |
3865 | doutc(cl, "got safe reply %llu, mds%d\n" , tid, mds); |
3866 | |
3867 | mutex_unlock(lock: &mdsc->mutex); |
3868 | goto out; |
3869 | } |
3870 | } else { |
3871 | set_bit(CEPH_MDS_R_GOT_UNSAFE, addr: &req->r_req_flags); |
3872 | list_add_tail(new: &req->r_unsafe_item, head: &req->r_session->s_unsafe); |
3873 | } |
3874 | |
3875 | doutc(cl, "tid %lld result %d\n" , tid, result); |
3876 | if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features)) |
3877 | err = parse_reply_info(s: session, msg, req, features: (u64)-1); |
3878 | else |
3879 | err = parse_reply_info(s: session, msg, req, |
3880 | features: session->s_con.peer_features); |
3881 | mutex_unlock(lock: &mdsc->mutex); |
3882 | |
3883 | /* Must find target inode outside of mutexes to avoid deadlocks */ |
3884 | rinfo = &req->r_reply_info; |
3885 | if ((err >= 0) && rinfo->head->is_target) { |
3886 | struct inode *in = xchg(&req->r_new_inode, NULL); |
3887 | struct ceph_vino tvino = { |
3888 | .ino = le64_to_cpu(rinfo->targeti.in->ino), |
3889 | .snap = le64_to_cpu(rinfo->targeti.in->snapid) |
3890 | }; |
3891 | |
3892 | /* |
3893 | * If we ended up opening an existing inode, discard |
3894 | * r_new_inode |
3895 | */ |
3896 | if (req->r_op == CEPH_MDS_OP_CREATE && |
3897 | !req->r_reply_info.has_create_ino) { |
3898 | /* This should never happen on an async create */ |
3899 | WARN_ON_ONCE(req->r_deleg_ino); |
3900 | iput(in); |
3901 | in = NULL; |
3902 | } |
3903 | |
3904 | in = ceph_get_inode(sb: mdsc->fsc->sb, vino: tvino, newino: in); |
3905 | if (IS_ERR(ptr: in)) { |
3906 | err = PTR_ERR(ptr: in); |
3907 | mutex_lock(&session->s_mutex); |
3908 | goto out_err; |
3909 | } |
3910 | req->r_target_inode = in; |
3911 | } |
3912 | |
3913 | mutex_lock(&session->s_mutex); |
3914 | if (err < 0) { |
3915 | pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n" , |
3916 | mds, tid); |
3917 | ceph_msg_dump(msg); |
3918 | goto out_err; |
3919 | } |
3920 | |
3921 | /* snap trace */ |
3922 | realm = NULL; |
3923 | if (rinfo->snapblob_len) { |
3924 | down_write(sem: &mdsc->snap_rwsem); |
3925 | err = ceph_update_snap_trace(m: mdsc, p: rinfo->snapblob, |
3926 | e: rinfo->snapblob + rinfo->snapblob_len, |
3927 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, |
3928 | realm_ret: &realm); |
3929 | if (err) { |
3930 | up_write(sem: &mdsc->snap_rwsem); |
3931 | close_sessions = true; |
3932 | if (err == -EIO) |
3933 | ceph_msg_dump(msg); |
3934 | goto out_err; |
3935 | } |
3936 | downgrade_write(sem: &mdsc->snap_rwsem); |
3937 | } else { |
3938 | down_read(sem: &mdsc->snap_rwsem); |
3939 | } |
3940 | |
3941 | /* insert trace into our cache */ |
3942 | mutex_lock(&req->r_fill_mutex); |
3943 | current->journal_info = req; |
3944 | err = ceph_fill_trace(sb: mdsc->fsc->sb, req); |
3945 | if (err == 0) { |
3946 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
3947 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
3948 | err = ceph_readdir_prepopulate(req, session: req->r_session); |
3949 | } |
3950 | current->journal_info = NULL; |
3951 | mutex_unlock(lock: &req->r_fill_mutex); |
3952 | |
3953 | up_read(sem: &mdsc->snap_rwsem); |
3954 | if (realm) |
3955 | ceph_put_snap_realm(mdsc, realm); |
3956 | |
3957 | if (err == 0) { |
3958 | if (req->r_target_inode && |
3959 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
3960 | struct ceph_inode_info *ci = |
3961 | ceph_inode(inode: req->r_target_inode); |
3962 | spin_lock(lock: &ci->i_unsafe_lock); |
3963 | list_add_tail(new: &req->r_unsafe_target_item, |
3964 | head: &ci->i_unsafe_iops); |
3965 | spin_unlock(lock: &ci->i_unsafe_lock); |
3966 | } |
3967 | |
3968 | ceph_unreserve_caps(mdsc, ctx: &req->r_caps_reservation); |
3969 | } |
3970 | out_err: |
3971 | mutex_lock(&mdsc->mutex); |
3972 | if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
3973 | if (err) { |
3974 | req->r_err = err; |
3975 | } else { |
3976 | req->r_reply = ceph_msg_get(msg); |
3977 | set_bit(CEPH_MDS_R_GOT_RESULT, addr: &req->r_req_flags); |
3978 | } |
3979 | } else { |
3980 | doutc(cl, "reply arrived after request %lld was aborted\n" , tid); |
3981 | } |
3982 | mutex_unlock(lock: &mdsc->mutex); |
3983 | |
3984 | mutex_unlock(lock: &session->s_mutex); |
3985 | |
3986 | /* kick calling process */ |
3987 | complete_request(mdsc, req); |
3988 | |
3989 | ceph_update_metadata_metrics(m: &mdsc->metric, r_start: req->r_start_latency, |
3990 | r_end: req->r_end_latency, rc: err); |
3991 | out: |
3992 | ceph_mdsc_put_request(req); |
3993 | |
3994 | /* Defer closing the sessions after s_mutex lock being released */ |
3995 | if (close_sessions) |
3996 | ceph_mdsc_close_sessions(mdsc); |
3997 | return; |
3998 | } |
3999 | |
4000 | |
4001 | |
4002 | /* |
4003 | * handle mds notification that our request has been forwarded. |
4004 | */ |
4005 | static void handle_forward(struct ceph_mds_client *mdsc, |
4006 | struct ceph_mds_session *session, |
4007 | struct ceph_msg *msg) |
4008 | { |
4009 | struct ceph_client *cl = mdsc->fsc->client; |
4010 | struct ceph_mds_request *req; |
4011 | u64 tid = le64_to_cpu(msg->hdr.tid); |
4012 | u32 next_mds; |
4013 | u32 fwd_seq; |
4014 | int err = -EINVAL; |
4015 | void *p = msg->front.iov_base; |
4016 | void *end = p + msg->front.iov_len; |
4017 | bool aborted = false; |
4018 | |
4019 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
4020 | next_mds = ceph_decode_32(p: &p); |
4021 | fwd_seq = ceph_decode_32(p: &p); |
4022 | |
4023 | mutex_lock(&mdsc->mutex); |
4024 | req = lookup_get_request(mdsc, tid); |
4025 | if (!req) { |
4026 | mutex_unlock(lock: &mdsc->mutex); |
4027 | doutc(cl, "forward tid %llu to mds%d - req dne\n" , tid, next_mds); |
4028 | return; /* dup reply? */ |
4029 | } |
4030 | |
4031 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
4032 | doutc(cl, "forward tid %llu aborted, unregistering\n" , tid); |
4033 | __unregister_request(mdsc, req); |
4034 | } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) { |
4035 | /* |
4036 | * Avoid inifinite retrying after overflow. |
4037 | * |
4038 | * The MDS will increase the fwd count and in client side |
4039 | * if the num_fwd is less than the one saved in request |
4040 | * that means the MDS is an old version and overflowed of |
4041 | * 8 bits. |
4042 | */ |
4043 | mutex_lock(&req->r_fill_mutex); |
4044 | req->r_err = -EMULTIHOP; |
4045 | set_bit(CEPH_MDS_R_ABORTED, addr: &req->r_req_flags); |
4046 | mutex_unlock(lock: &req->r_fill_mutex); |
4047 | aborted = true; |
4048 | pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n" , |
4049 | tid); |
4050 | } else { |
4051 | /* resend. forward race not possible; mds would drop */ |
4052 | doutc(cl, "forward tid %llu to mds%d (we resend)\n" , tid, next_mds); |
4053 | BUG_ON(req->r_err); |
4054 | BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); |
4055 | req->r_attempts = 0; |
4056 | req->r_num_fwd = fwd_seq; |
4057 | req->r_resend_mds = next_mds; |
4058 | put_request_session(req); |
4059 | __do_request(mdsc, req); |
4060 | } |
4061 | mutex_unlock(lock: &mdsc->mutex); |
4062 | |
4063 | /* kick calling process */ |
4064 | if (aborted) |
4065 | complete_request(mdsc, req); |
4066 | ceph_mdsc_put_request(req); |
4067 | return; |
4068 | |
4069 | bad: |
4070 | pr_err_client(cl, "decode error err=%d\n" , err); |
4071 | ceph_msg_dump(msg); |
4072 | } |
4073 | |
4074 | static int __decode_session_metadata(void **p, void *end, |
4075 | bool *blocklisted) |
4076 | { |
4077 | /* map<string,string> */ |
4078 | u32 n; |
4079 | bool err_str; |
4080 | ceph_decode_32_safe(p, end, n, bad); |
4081 | while (n-- > 0) { |
4082 | u32 len; |
4083 | ceph_decode_32_safe(p, end, len, bad); |
4084 | ceph_decode_need(p, end, len, bad); |
4085 | err_str = !strncmp(*p, "error_string" , len); |
4086 | *p += len; |
4087 | ceph_decode_32_safe(p, end, len, bad); |
4088 | ceph_decode_need(p, end, len, bad); |
4089 | /* |
4090 | * Match "blocklisted (blacklisted)" from newer MDSes, |
4091 | * or "blacklisted" from older MDSes. |
4092 | */ |
4093 | if (err_str && strnstr(*p, "blacklisted" , len)) |
4094 | *blocklisted = true; |
4095 | *p += len; |
4096 | } |
4097 | return 0; |
4098 | bad: |
4099 | return -1; |
4100 | } |
4101 | |
4102 | /* |
4103 | * handle a mds session control message |
4104 | */ |
4105 | static void handle_session(struct ceph_mds_session *session, |
4106 | struct ceph_msg *msg) |
4107 | { |
4108 | struct ceph_mds_client *mdsc = session->s_mdsc; |
4109 | struct ceph_client *cl = mdsc->fsc->client; |
4110 | int mds = session->s_mds; |
4111 | int msg_version = le16_to_cpu(msg->hdr.version); |
4112 | void *p = msg->front.iov_base; |
4113 | void *end = p + msg->front.iov_len; |
4114 | struct ceph_mds_session_head *h; |
4115 | u32 op; |
4116 | u64 seq, features = 0; |
4117 | int wake = 0; |
4118 | bool blocklisted = false; |
4119 | |
4120 | /* decode */ |
4121 | ceph_decode_need(&p, end, sizeof(*h), bad); |
4122 | h = p; |
4123 | p += sizeof(*h); |
4124 | |
4125 | op = le32_to_cpu(h->op); |
4126 | seq = le64_to_cpu(h->seq); |
4127 | |
4128 | if (msg_version >= 3) { |
4129 | u32 len; |
4130 | /* version >= 2 and < 5, decode metadata, skip otherwise |
4131 | * as it's handled via flags. |
4132 | */ |
4133 | if (msg_version >= 5) |
4134 | ceph_decode_skip_map(&p, end, string, string, bad); |
4135 | else if (__decode_session_metadata(p: &p, end, blocklisted: &blocklisted) < 0) |
4136 | goto bad; |
4137 | |
4138 | /* version >= 3, feature bits */ |
4139 | ceph_decode_32_safe(&p, end, len, bad); |
4140 | if (len) { |
4141 | ceph_decode_64_safe(&p, end, features, bad); |
4142 | p += len - sizeof(features); |
4143 | } |
4144 | } |
4145 | |
4146 | if (msg_version >= 5) { |
4147 | u32 flags, len; |
4148 | |
4149 | /* version >= 4 */ |
4150 | ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */ |
4151 | ceph_decode_32_safe(&p, end, len, bad); /* len */ |
4152 | ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */ |
4153 | |
4154 | /* version >= 5, flags */ |
4155 | ceph_decode_32_safe(&p, end, flags, bad); |
4156 | if (flags & CEPH_SESSION_BLOCKLISTED) { |
4157 | pr_warn_client(cl, "mds%d session blocklisted\n" , |
4158 | session->s_mds); |
4159 | blocklisted = true; |
4160 | } |
4161 | } |
4162 | |
4163 | mutex_lock(&mdsc->mutex); |
4164 | if (op == CEPH_SESSION_CLOSE) { |
4165 | ceph_get_mds_session(s: session); |
4166 | __unregister_session(mdsc, s: session); |
4167 | } |
4168 | /* FIXME: this ttl calculation is generous */ |
4169 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; |
4170 | mutex_unlock(lock: &mdsc->mutex); |
4171 | |
4172 | mutex_lock(&session->s_mutex); |
4173 | |
4174 | doutc(cl, "mds%d %s %p state %s seq %llu\n" , mds, |
4175 | ceph_session_op_name(op), session, |
4176 | ceph_session_state_name(session->s_state), seq); |
4177 | |
4178 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { |
4179 | session->s_state = CEPH_MDS_SESSION_OPEN; |
4180 | pr_info_client(cl, "mds%d came back\n" , session->s_mds); |
4181 | } |
4182 | |
4183 | switch (op) { |
4184 | case CEPH_SESSION_OPEN: |
4185 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
4186 | pr_info_client(cl, "mds%d reconnect success\n" , |
4187 | session->s_mds); |
4188 | |
4189 | session->s_features = features; |
4190 | if (session->s_state == CEPH_MDS_SESSION_OPEN) { |
4191 | pr_notice_client(cl, "mds%d is already opened\n" , |
4192 | session->s_mds); |
4193 | } else { |
4194 | session->s_state = CEPH_MDS_SESSION_OPEN; |
4195 | renewed_caps(mdsc, session, is_renew: 0); |
4196 | if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, |
4197 | &session->s_features)) |
4198 | metric_schedule_delayed(m: &mdsc->metric); |
4199 | } |
4200 | |
4201 | /* |
4202 | * The connection maybe broken and the session in client |
4203 | * side has been reinitialized, need to update the seq |
4204 | * anyway. |
4205 | */ |
4206 | if (!session->s_seq && seq) |
4207 | session->s_seq = seq; |
4208 | |
4209 | wake = 1; |
4210 | if (mdsc->stopping) |
4211 | __close_session(mdsc, session); |
4212 | break; |
4213 | |
4214 | case CEPH_SESSION_RENEWCAPS: |
4215 | if (session->s_renew_seq == seq) |
4216 | renewed_caps(mdsc, session, is_renew: 1); |
4217 | break; |
4218 | |
4219 | case CEPH_SESSION_CLOSE: |
4220 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
4221 | pr_info_client(cl, "mds%d reconnect denied\n" , |
4222 | session->s_mds); |
4223 | session->s_state = CEPH_MDS_SESSION_CLOSED; |
4224 | cleanup_session_requests(mdsc, session); |
4225 | remove_session_caps(session); |
4226 | wake = 2; /* for good measure */ |
4227 | wake_up_all(&mdsc->session_close_wq); |
4228 | break; |
4229 | |
4230 | case CEPH_SESSION_STALE: |
4231 | pr_info_client(cl, "mds%d caps went stale, renewing\n" , |
4232 | session->s_mds); |
4233 | atomic_inc(v: &session->s_cap_gen); |
4234 | session->s_cap_ttl = jiffies - 1; |
4235 | send_renew_caps(mdsc, session); |
4236 | break; |
4237 | |
4238 | case CEPH_SESSION_RECALL_STATE: |
4239 | ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); |
4240 | break; |
4241 | |
4242 | case CEPH_SESSION_FLUSHMSG: |
4243 | /* flush cap releases */ |
4244 | spin_lock(lock: &session->s_cap_lock); |
4245 | if (session->s_num_cap_releases) |
4246 | ceph_flush_cap_releases(mdsc, session); |
4247 | spin_unlock(lock: &session->s_cap_lock); |
4248 | |
4249 | send_flushmsg_ack(mdsc, session, seq); |
4250 | break; |
4251 | |
4252 | case CEPH_SESSION_FORCE_RO: |
4253 | doutc(cl, "force_session_readonly %p\n" , session); |
4254 | spin_lock(lock: &session->s_cap_lock); |
4255 | session->s_readonly = true; |
4256 | spin_unlock(lock: &session->s_cap_lock); |
4257 | wake_up_session_caps(session, ev: FORCE_RO); |
4258 | break; |
4259 | |
4260 | case CEPH_SESSION_REJECT: |
4261 | WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); |
4262 | pr_info_client(cl, "mds%d rejected session\n" , |
4263 | session->s_mds); |
4264 | session->s_state = CEPH_MDS_SESSION_REJECTED; |
4265 | cleanup_session_requests(mdsc, session); |
4266 | remove_session_caps(session); |
4267 | if (blocklisted) |
4268 | mdsc->fsc->blocklisted = true; |
4269 | wake = 2; /* for good measure */ |
4270 | break; |
4271 | |
4272 | default: |
4273 | pr_err_client(cl, "bad op %d mds%d\n" , op, mds); |
4274 | WARN_ON(1); |
4275 | } |
4276 | |
4277 | mutex_unlock(lock: &session->s_mutex); |
4278 | if (wake) { |
4279 | mutex_lock(&mdsc->mutex); |
4280 | __wake_requests(mdsc, head: &session->s_waiting); |
4281 | if (wake == 2) |
4282 | kick_requests(mdsc, mds); |
4283 | mutex_unlock(lock: &mdsc->mutex); |
4284 | } |
4285 | if (op == CEPH_SESSION_CLOSE) |
4286 | ceph_put_mds_session(s: session); |
4287 | return; |
4288 | |
4289 | bad: |
4290 | pr_err_client(cl, "corrupt message mds%d len %d\n" , mds, |
4291 | (int)msg->front.iov_len); |
4292 | ceph_msg_dump(msg); |
4293 | return; |
4294 | } |
4295 | |
4296 | void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req) |
4297 | { |
4298 | struct ceph_client *cl = req->r_mdsc->fsc->client; |
4299 | int dcaps; |
4300 | |
4301 | dcaps = xchg(&req->r_dir_caps, 0); |
4302 | if (dcaps) { |
4303 | doutc(cl, "releasing r_dir_caps=%s\n" , ceph_cap_string(dcaps)); |
4304 | ceph_put_cap_refs(ci: ceph_inode(inode: req->r_parent), had: dcaps); |
4305 | } |
4306 | } |
4307 | |
4308 | void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req) |
4309 | { |
4310 | struct ceph_client *cl = req->r_mdsc->fsc->client; |
4311 | int dcaps; |
4312 | |
4313 | dcaps = xchg(&req->r_dir_caps, 0); |
4314 | if (dcaps) { |
4315 | doutc(cl, "releasing r_dir_caps=%s\n" , ceph_cap_string(dcaps)); |
4316 | ceph_put_cap_refs_async(ci: ceph_inode(inode: req->r_parent), had: dcaps); |
4317 | } |
4318 | } |
4319 | |
4320 | /* |
4321 | * called under session->mutex. |
4322 | */ |
4323 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, |
4324 | struct ceph_mds_session *session) |
4325 | { |
4326 | struct ceph_mds_request *req, *nreq; |
4327 | struct rb_node *p; |
4328 | |
4329 | doutc(mdsc->fsc->client, "mds%d\n" , session->s_mds); |
4330 | |
4331 | mutex_lock(&mdsc->mutex); |
4332 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) |
4333 | __send_request(session, req, drop_cap_releases: true); |
4334 | |
4335 | /* |
4336 | * also re-send old requests when MDS enters reconnect stage. So that MDS |
4337 | * can process completed request in clientreplay stage. |
4338 | */ |
4339 | p = rb_first(&mdsc->request_tree); |
4340 | while (p) { |
4341 | req = rb_entry(p, struct ceph_mds_request, r_node); |
4342 | p = rb_next(p); |
4343 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
4344 | continue; |
4345 | if (req->r_attempts == 0) |
4346 | continue; /* only old requests */ |
4347 | if (!req->r_session) |
4348 | continue; |
4349 | if (req->r_session->s_mds != session->s_mds) |
4350 | continue; |
4351 | |
4352 | ceph_mdsc_release_dir_caps_async(req); |
4353 | |
4354 | __send_request(session, req, drop_cap_releases: true); |
4355 | } |
4356 | mutex_unlock(lock: &mdsc->mutex); |
4357 | } |
4358 | |
4359 | static int send_reconnect_partial(struct ceph_reconnect_state *recon_state) |
4360 | { |
4361 | struct ceph_msg *reply; |
4362 | struct ceph_pagelist *_pagelist; |
4363 | struct page *page; |
4364 | __le32 *addr; |
4365 | int err = -ENOMEM; |
4366 | |
4367 | if (!recon_state->allow_multi) |
4368 | return -ENOSPC; |
4369 | |
4370 | /* can't handle message that contains both caps and realm */ |
4371 | BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms); |
4372 | |
4373 | /* pre-allocate new pagelist */ |
4374 | _pagelist = ceph_pagelist_alloc(GFP_NOFS); |
4375 | if (!_pagelist) |
4376 | return -ENOMEM; |
4377 | |
4378 | reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, front_len: 0, max_data_items: 1, GFP_NOFS, can_fail: false); |
4379 | if (!reply) |
4380 | goto fail_msg; |
4381 | |
4382 | /* placeholder for nr_caps */ |
4383 | err = ceph_pagelist_encode_32(pl: _pagelist, v: 0); |
4384 | if (err < 0) |
4385 | goto fail; |
4386 | |
4387 | if (recon_state->nr_caps) { |
4388 | /* currently encoding caps */ |
4389 | err = ceph_pagelist_encode_32(pl: recon_state->pagelist, v: 0); |
4390 | if (err) |
4391 | goto fail; |
4392 | } else { |
4393 | /* placeholder for nr_realms (currently encoding relams) */ |
4394 | err = ceph_pagelist_encode_32(pl: _pagelist, v: 0); |
4395 | if (err < 0) |
4396 | goto fail; |
4397 | } |
4398 | |
4399 | err = ceph_pagelist_encode_8(pl: recon_state->pagelist, v: 1); |
4400 | if (err) |
4401 | goto fail; |
4402 | |
4403 | page = list_first_entry(&recon_state->pagelist->head, struct page, lru); |
4404 | addr = kmap_atomic(page); |
4405 | if (recon_state->nr_caps) { |
4406 | /* currently encoding caps */ |
4407 | *addr = cpu_to_le32(recon_state->nr_caps); |
4408 | } else { |
4409 | /* currently encoding relams */ |
4410 | *(addr + 1) = cpu_to_le32(recon_state->nr_realms); |
4411 | } |
4412 | kunmap_atomic(addr); |
4413 | |
4414 | reply->hdr.version = cpu_to_le16(5); |
4415 | reply->hdr.compat_version = cpu_to_le16(4); |
4416 | |
4417 | reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length); |
4418 | ceph_msg_data_add_pagelist(msg: reply, pagelist: recon_state->pagelist); |
4419 | |
4420 | ceph_con_send(con: &recon_state->session->s_con, msg: reply); |
4421 | ceph_pagelist_release(pl: recon_state->pagelist); |
4422 | |
4423 | recon_state->pagelist = _pagelist; |
4424 | recon_state->nr_caps = 0; |
4425 | recon_state->nr_realms = 0; |
4426 | recon_state->msg_version = 5; |
4427 | return 0; |
4428 | fail: |
4429 | ceph_msg_put(msg: reply); |
4430 | fail_msg: |
4431 | ceph_pagelist_release(pl: _pagelist); |
4432 | return err; |
4433 | } |
4434 | |
4435 | static struct dentry* d_find_primary(struct inode *inode) |
4436 | { |
4437 | struct dentry *alias, *dn = NULL; |
4438 | |
4439 | if (hlist_empty(h: &inode->i_dentry)) |
4440 | return NULL; |
4441 | |
4442 | spin_lock(lock: &inode->i_lock); |
4443 | if (hlist_empty(h: &inode->i_dentry)) |
4444 | goto out_unlock; |
4445 | |
4446 | if (S_ISDIR(inode->i_mode)) { |
4447 | alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); |
4448 | if (!IS_ROOT(alias)) |
4449 | dn = dget(dentry: alias); |
4450 | goto out_unlock; |
4451 | } |
4452 | |
4453 | hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { |
4454 | spin_lock(lock: &alias->d_lock); |
4455 | if (!d_unhashed(dentry: alias) && |
4456 | (ceph_dentry(dentry: alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) { |
4457 | dn = dget_dlock(dentry: alias); |
4458 | } |
4459 | spin_unlock(lock: &alias->d_lock); |
4460 | if (dn) |
4461 | break; |
4462 | } |
4463 | out_unlock: |
4464 | spin_unlock(lock: &inode->i_lock); |
4465 | return dn; |
4466 | } |
4467 | |
4468 | /* |
4469 | * Encode information about a cap for a reconnect with the MDS. |
4470 | */ |
4471 | static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) |
4472 | { |
4473 | struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb: inode->i_sb); |
4474 | struct ceph_client *cl = ceph_inode_to_client(inode); |
4475 | union { |
4476 | struct ceph_mds_cap_reconnect v2; |
4477 | struct ceph_mds_cap_reconnect_v1 v1; |
4478 | } rec; |
4479 | struct ceph_inode_info *ci = ceph_inode(inode); |
4480 | struct ceph_reconnect_state *recon_state = arg; |
4481 | struct ceph_pagelist *pagelist = recon_state->pagelist; |
4482 | struct dentry *dentry; |
4483 | struct ceph_cap *cap; |
4484 | char *path; |
4485 | int pathlen = 0, err; |
4486 | u64 pathbase; |
4487 | u64 snap_follows; |
4488 | |
4489 | dentry = d_find_primary(inode); |
4490 | if (dentry) { |
4491 | /* set pathbase to parent dir when msg_version >= 2 */ |
4492 | path = ceph_mdsc_build_path(mdsc, dentry, plen: &pathlen, pbase: &pathbase, |
4493 | for_wire: recon_state->msg_version >= 2); |
4494 | dput(dentry); |
4495 | if (IS_ERR(ptr: path)) { |
4496 | err = PTR_ERR(ptr: path); |
4497 | goto out_err; |
4498 | } |
4499 | } else { |
4500 | path = NULL; |
4501 | pathbase = 0; |
4502 | } |
4503 | |
4504 | spin_lock(lock: &ci->i_ceph_lock); |
4505 | cap = __get_cap_for_mds(ci, mds); |
4506 | if (!cap) { |
4507 | spin_unlock(lock: &ci->i_ceph_lock); |
4508 | err = 0; |
4509 | goto out_err; |
4510 | } |
4511 | doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n" , inode, |
4512 | ceph_vinop(inode), cap, cap->cap_id, |
4513 | ceph_cap_string(cap->issued)); |
4514 | |
4515 | cap->seq = 0; /* reset cap seq */ |
4516 | cap->issue_seq = 0; /* and issue_seq */ |
4517 | cap->mseq = 0; /* and migrate_seq */ |
4518 | cap->cap_gen = atomic_read(v: &cap->session->s_cap_gen); |
4519 | |
4520 | /* These are lost when the session goes away */ |
4521 | if (S_ISDIR(inode->i_mode)) { |
4522 | if (cap->issued & CEPH_CAP_DIR_CREATE) { |
4523 | ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); |
4524 | memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); |
4525 | } |
4526 | cap->issued &= ~CEPH_CAP_ANY_DIR_OPS; |
4527 | } |
4528 | |
4529 | if (recon_state->msg_version >= 2) { |
4530 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); |
4531 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); |
4532 | rec.v2.issued = cpu_to_le32(cap->issued); |
4533 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); |
4534 | rec.v2.pathbase = cpu_to_le64(pathbase); |
4535 | rec.v2.flock_len = (__force __le32) |
4536 | ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); |
4537 | } else { |
4538 | struct timespec64 ts; |
4539 | |
4540 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); |
4541 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); |
4542 | rec.v1.issued = cpu_to_le32(cap->issued); |
4543 | rec.v1.size = cpu_to_le64(i_size_read(inode)); |
4544 | ts = inode_get_mtime(inode); |
4545 | ceph_encode_timespec64(tv: &rec.v1.mtime, ts: &ts); |
4546 | ts = inode_get_atime(inode); |
4547 | ceph_encode_timespec64(tv: &rec.v1.atime, ts: &ts); |
4548 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); |
4549 | rec.v1.pathbase = cpu_to_le64(pathbase); |
4550 | } |
4551 | |
4552 | if (list_empty(head: &ci->i_cap_snaps)) { |
4553 | snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0; |
4554 | } else { |
4555 | struct ceph_cap_snap *capsnap = |
4556 | list_first_entry(&ci->i_cap_snaps, |
4557 | struct ceph_cap_snap, ci_item); |
4558 | snap_follows = capsnap->follows; |
4559 | } |
4560 | spin_unlock(lock: &ci->i_ceph_lock); |
4561 | |
4562 | if (recon_state->msg_version >= 2) { |
4563 | int num_fcntl_locks, num_flock_locks; |
4564 | struct ceph_filelock *flocks = NULL; |
4565 | size_t struct_len, total_len = sizeof(u64); |
4566 | u8 struct_v = 0; |
4567 | |
4568 | encode_again: |
4569 | if (rec.v2.flock_len) { |
4570 | ceph_count_locks(inode, p_num: &num_fcntl_locks, f_num: &num_flock_locks); |
4571 | } else { |
4572 | num_fcntl_locks = 0; |
4573 | num_flock_locks = 0; |
4574 | } |
4575 | if (num_fcntl_locks + num_flock_locks > 0) { |
4576 | flocks = kmalloc_array(n: num_fcntl_locks + num_flock_locks, |
4577 | size: sizeof(struct ceph_filelock), |
4578 | GFP_NOFS); |
4579 | if (!flocks) { |
4580 | err = -ENOMEM; |
4581 | goto out_err; |
4582 | } |
4583 | err = ceph_encode_locks_to_buffer(inode, flocks, |
4584 | num_fcntl_locks, |
4585 | num_flock_locks); |
4586 | if (err) { |
4587 | kfree(objp: flocks); |
4588 | flocks = NULL; |
4589 | if (err == -ENOSPC) |
4590 | goto encode_again; |
4591 | goto out_err; |
4592 | } |
4593 | } else { |
4594 | kfree(objp: flocks); |
4595 | flocks = NULL; |
4596 | } |
4597 | |
4598 | if (recon_state->msg_version >= 3) { |
4599 | /* version, compat_version and struct_len */ |
4600 | total_len += 2 * sizeof(u8) + sizeof(u32); |
4601 | struct_v = 2; |
4602 | } |
4603 | /* |
4604 | * number of encoded locks is stable, so copy to pagelist |
4605 | */ |
4606 | struct_len = 2 * sizeof(u32) + |
4607 | (num_fcntl_locks + num_flock_locks) * |
4608 | sizeof(struct ceph_filelock); |
4609 | rec.v2.flock_len = cpu_to_le32(struct_len); |
4610 | |
4611 | struct_len += sizeof(u32) + pathlen + sizeof(rec.v2); |
4612 | |
4613 | if (struct_v >= 2) |
4614 | struct_len += sizeof(u64); /* snap_follows */ |
4615 | |
4616 | total_len += struct_len; |
4617 | |
4618 | if (pagelist->length + total_len > RECONNECT_MAX_SIZE) { |
4619 | err = send_reconnect_partial(recon_state); |
4620 | if (err) |
4621 | goto out_freeflocks; |
4622 | pagelist = recon_state->pagelist; |
4623 | } |
4624 | |
4625 | err = ceph_pagelist_reserve(pl: pagelist, space: total_len); |
4626 | if (err) |
4627 | goto out_freeflocks; |
4628 | |
4629 | ceph_pagelist_encode_64(pl: pagelist, v: ceph_ino(inode)); |
4630 | if (recon_state->msg_version >= 3) { |
4631 | ceph_pagelist_encode_8(pl: pagelist, v: struct_v); |
4632 | ceph_pagelist_encode_8(pl: pagelist, v: 1); |
4633 | ceph_pagelist_encode_32(pl: pagelist, v: struct_len); |
4634 | } |
4635 | ceph_pagelist_encode_string(pl: pagelist, s: path, len: pathlen); |
4636 | ceph_pagelist_append(pl: pagelist, d: &rec, l: sizeof(rec.v2)); |
4637 | ceph_locks_to_pagelist(flocks, pagelist, |
4638 | num_fcntl_locks, num_flock_locks); |
4639 | if (struct_v >= 2) |
4640 | ceph_pagelist_encode_64(pl: pagelist, v: snap_follows); |
4641 | out_freeflocks: |
4642 | kfree(objp: flocks); |
4643 | } else { |
4644 | err = ceph_pagelist_reserve(pl: pagelist, |
4645 | space: sizeof(u64) + sizeof(u32) + |
4646 | pathlen + sizeof(rec.v1)); |
4647 | if (err) |
4648 | goto out_err; |
4649 | |
4650 | ceph_pagelist_encode_64(pl: pagelist, v: ceph_ino(inode)); |
4651 | ceph_pagelist_encode_string(pl: pagelist, s: path, len: pathlen); |
4652 | ceph_pagelist_append(pl: pagelist, d: &rec, l: sizeof(rec.v1)); |
4653 | } |
4654 | |
4655 | out_err: |
4656 | ceph_mdsc_free_path(path, len: pathlen); |
4657 | if (!err) |
4658 | recon_state->nr_caps++; |
4659 | return err; |
4660 | } |
4661 | |
4662 | static int encode_snap_realms(struct ceph_mds_client *mdsc, |
4663 | struct ceph_reconnect_state *recon_state) |
4664 | { |
4665 | struct rb_node *p; |
4666 | struct ceph_pagelist *pagelist = recon_state->pagelist; |
4667 | struct ceph_client *cl = mdsc->fsc->client; |
4668 | int err = 0; |
4669 | |
4670 | if (recon_state->msg_version >= 4) { |
4671 | err = ceph_pagelist_encode_32(pl: pagelist, v: mdsc->num_snap_realms); |
4672 | if (err < 0) |
4673 | goto fail; |
4674 | } |
4675 | |
4676 | /* |
4677 | * snaprealms. we provide mds with the ino, seq (version), and |
4678 | * parent for all of our realms. If the mds has any newer info, |
4679 | * it will tell us. |
4680 | */ |
4681 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
4682 | struct ceph_snap_realm *realm = |
4683 | rb_entry(p, struct ceph_snap_realm, node); |
4684 | struct ceph_mds_snaprealm_reconnect sr_rec; |
4685 | |
4686 | if (recon_state->msg_version >= 4) { |
4687 | size_t need = sizeof(u8) * 2 + sizeof(u32) + |
4688 | sizeof(sr_rec); |
4689 | |
4690 | if (pagelist->length + need > RECONNECT_MAX_SIZE) { |
4691 | err = send_reconnect_partial(recon_state); |
4692 | if (err) |
4693 | goto fail; |
4694 | pagelist = recon_state->pagelist; |
4695 | } |
4696 | |
4697 | err = ceph_pagelist_reserve(pl: pagelist, space: need); |
4698 | if (err) |
4699 | goto fail; |
4700 | |
4701 | ceph_pagelist_encode_8(pl: pagelist, v: 1); |
4702 | ceph_pagelist_encode_8(pl: pagelist, v: 1); |
4703 | ceph_pagelist_encode_32(pl: pagelist, v: sizeof(sr_rec)); |
4704 | } |
4705 | |
4706 | doutc(cl, " adding snap realm %llx seq %lld parent %llx\n" , |
4707 | realm->ino, realm->seq, realm->parent_ino); |
4708 | sr_rec.ino = cpu_to_le64(realm->ino); |
4709 | sr_rec.seq = cpu_to_le64(realm->seq); |
4710 | sr_rec.parent = cpu_to_le64(realm->parent_ino); |
4711 | |
4712 | err = ceph_pagelist_append(pl: pagelist, d: &sr_rec, l: sizeof(sr_rec)); |
4713 | if (err) |
4714 | goto fail; |
4715 | |
4716 | recon_state->nr_realms++; |
4717 | } |
4718 | fail: |
4719 | return err; |
4720 | } |
4721 | |
4722 | |
4723 | /* |
4724 | * If an MDS fails and recovers, clients need to reconnect in order to |
4725 | * reestablish shared state. This includes all caps issued through |
4726 | * this session _and_ the snap_realm hierarchy. Because it's not |
4727 | * clear which snap realms the mds cares about, we send everything we |
4728 | * know about.. that ensures we'll then get any new info the |
4729 | * recovering MDS might have. |
4730 | * |
4731 | * This is a relatively heavyweight operation, but it's rare. |
4732 | */ |
4733 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
4734 | struct ceph_mds_session *session) |
4735 | { |
4736 | struct ceph_client *cl = mdsc->fsc->client; |
4737 | struct ceph_msg *reply; |
4738 | int mds = session->s_mds; |
4739 | int err = -ENOMEM; |
4740 | struct ceph_reconnect_state recon_state = { |
4741 | .session = session, |
4742 | }; |
4743 | LIST_HEAD(dispose); |
4744 | |
4745 | pr_info_client(cl, "mds%d reconnect start\n" , mds); |
4746 | |
4747 | recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS); |
4748 | if (!recon_state.pagelist) |
4749 | goto fail_nopagelist; |
4750 | |
4751 | reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, front_len: 0, max_data_items: 1, GFP_NOFS, can_fail: false); |
4752 | if (!reply) |
4753 | goto fail_nomsg; |
4754 | |
4755 | xa_destroy(&session->s_delegated_inos); |
4756 | |
4757 | mutex_lock(&session->s_mutex); |
4758 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; |
4759 | session->s_seq = 0; |
4760 | |
4761 | doutc(cl, "session %p state %s\n" , session, |
4762 | ceph_session_state_name(session->s_state)); |
4763 | |
4764 | atomic_inc(v: &session->s_cap_gen); |
4765 | |
4766 | spin_lock(lock: &session->s_cap_lock); |
4767 | /* don't know if session is readonly */ |
4768 | session->s_readonly = 0; |
4769 | /* |
4770 | * notify __ceph_remove_cap() that we are composing cap reconnect. |
4771 | * If a cap get released before being added to the cap reconnect, |
4772 | * __ceph_remove_cap() should skip queuing cap release. |
4773 | */ |
4774 | session->s_cap_reconnect = 1; |
4775 | /* drop old cap expires; we're about to reestablish that state */ |
4776 | detach_cap_releases(session, target: &dispose); |
4777 | spin_unlock(lock: &session->s_cap_lock); |
4778 | dispose_cap_releases(mdsc, dispose: &dispose); |
4779 | |
4780 | /* trim unused caps to reduce MDS's cache rejoin time */ |
4781 | if (mdsc->fsc->sb->s_root) |
4782 | shrink_dcache_parent(mdsc->fsc->sb->s_root); |
4783 | |
4784 | ceph_con_close(con: &session->s_con); |
4785 | ceph_con_open(con: &session->s_con, |
4786 | CEPH_ENTITY_TYPE_MDS, entity_num: mds, |
4787 | addr: ceph_mdsmap_get_addr(m: mdsc->mdsmap, w: mds)); |
4788 | |
4789 | /* replay unsafe requests */ |
4790 | replay_unsafe_requests(mdsc, session); |
4791 | |
4792 | ceph_early_kick_flushing_caps(mdsc, session); |
4793 | |
4794 | down_read(sem: &mdsc->snap_rwsem); |
4795 | |
4796 | /* placeholder for nr_caps */ |
4797 | err = ceph_pagelist_encode_32(pl: recon_state.pagelist, v: 0); |
4798 | if (err) |
4799 | goto fail; |
4800 | |
4801 | if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) { |
4802 | recon_state.msg_version = 3; |
4803 | recon_state.allow_multi = true; |
4804 | } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) { |
4805 | recon_state.msg_version = 3; |
4806 | } else { |
4807 | recon_state.msg_version = 2; |
4808 | } |
4809 | /* trsaverse this session's caps */ |
4810 | err = ceph_iterate_session_caps(session, cb: reconnect_caps_cb, arg: &recon_state); |
4811 | |
4812 | spin_lock(lock: &session->s_cap_lock); |
4813 | session->s_cap_reconnect = 0; |
4814 | spin_unlock(lock: &session->s_cap_lock); |
4815 | |
4816 | if (err < 0) |
4817 | goto fail; |
4818 | |
4819 | /* check if all realms can be encoded into current message */ |
4820 | if (mdsc->num_snap_realms) { |
4821 | size_t total_len = |
4822 | recon_state.pagelist->length + |
4823 | mdsc->num_snap_realms * |
4824 | sizeof(struct ceph_mds_snaprealm_reconnect); |
4825 | if (recon_state.msg_version >= 4) { |
4826 | /* number of realms */ |
4827 | total_len += sizeof(u32); |
4828 | /* version, compat_version and struct_len */ |
4829 | total_len += mdsc->num_snap_realms * |
4830 | (2 * sizeof(u8) + sizeof(u32)); |
4831 | } |
4832 | if (total_len > RECONNECT_MAX_SIZE) { |
4833 | if (!recon_state.allow_multi) { |
4834 | err = -ENOSPC; |
4835 | goto fail; |
4836 | } |
4837 | if (recon_state.nr_caps) { |
4838 | err = send_reconnect_partial(recon_state: &recon_state); |
4839 | if (err) |
4840 | goto fail; |
4841 | } |
4842 | recon_state.msg_version = 5; |
4843 | } |
4844 | } |
4845 | |
4846 | err = encode_snap_realms(mdsc, recon_state: &recon_state); |
4847 | if (err < 0) |
4848 | goto fail; |
4849 | |
4850 | if (recon_state.msg_version >= 5) { |
4851 | err = ceph_pagelist_encode_8(pl: recon_state.pagelist, v: 0); |
4852 | if (err < 0) |
4853 | goto fail; |
4854 | } |
4855 | |
4856 | if (recon_state.nr_caps || recon_state.nr_realms) { |
4857 | struct page *page = |
4858 | list_first_entry(&recon_state.pagelist->head, |
4859 | struct page, lru); |
4860 | __le32 *addr = kmap_atomic(page); |
4861 | if (recon_state.nr_caps) { |
4862 | WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms); |
4863 | *addr = cpu_to_le32(recon_state.nr_caps); |
4864 | } else if (recon_state.msg_version >= 4) { |
4865 | *(addr + 1) = cpu_to_le32(recon_state.nr_realms); |
4866 | } |
4867 | kunmap_atomic(addr); |
4868 | } |
4869 | |
4870 | reply->hdr.version = cpu_to_le16(recon_state.msg_version); |
4871 | if (recon_state.msg_version >= 4) |
4872 | reply->hdr.compat_version = cpu_to_le16(4); |
4873 | |
4874 | reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length); |
4875 | ceph_msg_data_add_pagelist(msg: reply, pagelist: recon_state.pagelist); |
4876 | |
4877 | ceph_con_send(con: &session->s_con, msg: reply); |
4878 | |
4879 | mutex_unlock(lock: &session->s_mutex); |
4880 | |
4881 | mutex_lock(&mdsc->mutex); |
4882 | __wake_requests(mdsc, head: &session->s_waiting); |
4883 | mutex_unlock(lock: &mdsc->mutex); |
4884 | |
4885 | up_read(sem: &mdsc->snap_rwsem); |
4886 | ceph_pagelist_release(pl: recon_state.pagelist); |
4887 | return; |
4888 | |
4889 | fail: |
4890 | ceph_msg_put(msg: reply); |
4891 | up_read(sem: &mdsc->snap_rwsem); |
4892 | mutex_unlock(lock: &session->s_mutex); |
4893 | fail_nomsg: |
4894 | ceph_pagelist_release(pl: recon_state.pagelist); |
4895 | fail_nopagelist: |
4896 | pr_err_client(cl, "error %d preparing reconnect for mds%d\n" , |
4897 | err, mds); |
4898 | return; |
4899 | } |
4900 | |
4901 | |
4902 | /* |
4903 | * compare old and new mdsmaps, kicking requests |
4904 | * and closing out old connections as necessary |
4905 | * |
4906 | * called under mdsc->mutex. |
4907 | */ |
4908 | static void check_new_map(struct ceph_mds_client *mdsc, |
4909 | struct ceph_mdsmap *newmap, |
4910 | struct ceph_mdsmap *oldmap) |
4911 | { |
4912 | int i, j, err; |
4913 | int oldstate, newstate; |
4914 | struct ceph_mds_session *s; |
4915 | unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0}; |
4916 | struct ceph_client *cl = mdsc->fsc->client; |
4917 | |
4918 | doutc(cl, "new %u old %u\n" , newmap->m_epoch, oldmap->m_epoch); |
4919 | |
4920 | if (newmap->m_info) { |
4921 | for (i = 0; i < newmap->possible_max_rank; i++) { |
4922 | for (j = 0; j < newmap->m_info[i].num_export_targets; j++) |
4923 | set_bit(nr: newmap->m_info[i].export_targets[j], addr: targets); |
4924 | } |
4925 | } |
4926 | |
4927 | for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) { |
4928 | if (!mdsc->sessions[i]) |
4929 | continue; |
4930 | s = mdsc->sessions[i]; |
4931 | oldstate = ceph_mdsmap_get_state(m: oldmap, w: i); |
4932 | newstate = ceph_mdsmap_get_state(m: newmap, w: i); |
4933 | |
4934 | doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n" , |
4935 | i, ceph_mds_state_name(oldstate), |
4936 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "" , |
4937 | ceph_mds_state_name(newstate), |
4938 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "" , |
4939 | ceph_session_state_name(s->s_state)); |
4940 | |
4941 | if (i >= newmap->possible_max_rank) { |
4942 | /* force close session for stopped mds */ |
4943 | ceph_get_mds_session(s); |
4944 | __unregister_session(mdsc, s); |
4945 | __wake_requests(mdsc, head: &s->s_waiting); |
4946 | mutex_unlock(lock: &mdsc->mutex); |
4947 | |
4948 | mutex_lock(&s->s_mutex); |
4949 | cleanup_session_requests(mdsc, session: s); |
4950 | remove_session_caps(session: s); |
4951 | mutex_unlock(lock: &s->s_mutex); |
4952 | |
4953 | ceph_put_mds_session(s); |
4954 | |
4955 | mutex_lock(&mdsc->mutex); |
4956 | kick_requests(mdsc, mds: i); |
4957 | continue; |
4958 | } |
4959 | |
4960 | if (memcmp(p: ceph_mdsmap_get_addr(m: oldmap, w: i), |
4961 | q: ceph_mdsmap_get_addr(m: newmap, w: i), |
4962 | size: sizeof(struct ceph_entity_addr))) { |
4963 | /* just close it */ |
4964 | mutex_unlock(lock: &mdsc->mutex); |
4965 | mutex_lock(&s->s_mutex); |
4966 | mutex_lock(&mdsc->mutex); |
4967 | ceph_con_close(con: &s->s_con); |
4968 | mutex_unlock(lock: &s->s_mutex); |
4969 | s->s_state = CEPH_MDS_SESSION_RESTARTING; |
4970 | } else if (oldstate == newstate) { |
4971 | continue; /* nothing new with this mds */ |
4972 | } |
4973 | |
4974 | /* |
4975 | * send reconnect? |
4976 | */ |
4977 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && |
4978 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
4979 | mutex_unlock(lock: &mdsc->mutex); |
4980 | clear_bit(nr: i, addr: targets); |
4981 | send_mds_reconnect(mdsc, session: s); |
4982 | mutex_lock(&mdsc->mutex); |
4983 | } |
4984 | |
4985 | /* |
4986 | * kick request on any mds that has gone active. |
4987 | */ |
4988 | if (oldstate < CEPH_MDS_STATE_ACTIVE && |
4989 | newstate >= CEPH_MDS_STATE_ACTIVE) { |
4990 | if (oldstate != CEPH_MDS_STATE_CREATING && |
4991 | oldstate != CEPH_MDS_STATE_STARTING) |
4992 | pr_info_client(cl, "mds%d recovery completed\n" , |
4993 | s->s_mds); |
4994 | kick_requests(mdsc, mds: i); |
4995 | mutex_unlock(lock: &mdsc->mutex); |
4996 | mutex_lock(&s->s_mutex); |
4997 | mutex_lock(&mdsc->mutex); |
4998 | ceph_kick_flushing_caps(mdsc, session: s); |
4999 | mutex_unlock(lock: &s->s_mutex); |
5000 | wake_up_session_caps(session: s, ev: RECONNECT); |
5001 | } |
5002 | } |
5003 | |
5004 | /* |
5005 | * Only open and reconnect sessions that don't exist yet. |
5006 | */ |
5007 | for (i = 0; i < newmap->possible_max_rank; i++) { |
5008 | /* |
5009 | * In case the import MDS is crashed just after |
5010 | * the EImportStart journal is flushed, so when |
5011 | * a standby MDS takes over it and is replaying |
5012 | * the EImportStart journal the new MDS daemon |
5013 | * will wait the client to reconnect it, but the |
5014 | * client may never register/open the session yet. |
5015 | * |
5016 | * Will try to reconnect that MDS daemon if the |
5017 | * rank number is in the export targets array and |
5018 | * is the up:reconnect state. |
5019 | */ |
5020 | newstate = ceph_mdsmap_get_state(m: newmap, w: i); |
5021 | if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT) |
5022 | continue; |
5023 | |
5024 | /* |
5025 | * The session maybe registered and opened by some |
5026 | * requests which were choosing random MDSes during |
5027 | * the mdsc->mutex's unlock/lock gap below in rare |
5028 | * case. But the related MDS daemon will just queue |
5029 | * that requests and be still waiting for the client's |
5030 | * reconnection request in up:reconnect state. |
5031 | */ |
5032 | s = __ceph_lookup_mds_session(mdsc, mds: i); |
5033 | if (likely(!s)) { |
5034 | s = __open_export_target_session(mdsc, target: i); |
5035 | if (IS_ERR(ptr: s)) { |
5036 | err = PTR_ERR(ptr: s); |
5037 | pr_err_client(cl, |
5038 | "failed to open export target session, err %d\n" , |
5039 | err); |
5040 | continue; |
5041 | } |
5042 | } |
5043 | doutc(cl, "send reconnect to export target mds.%d\n" , i); |
5044 | mutex_unlock(lock: &mdsc->mutex); |
5045 | send_mds_reconnect(mdsc, session: s); |
5046 | ceph_put_mds_session(s); |
5047 | mutex_lock(&mdsc->mutex); |
5048 | } |
5049 | |
5050 | for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) { |
5051 | s = mdsc->sessions[i]; |
5052 | if (!s) |
5053 | continue; |
5054 | if (!ceph_mdsmap_is_laggy(m: newmap, w: i)) |
5055 | continue; |
5056 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
5057 | s->s_state == CEPH_MDS_SESSION_HUNG || |
5058 | s->s_state == CEPH_MDS_SESSION_CLOSING) { |
5059 | doutc(cl, " connecting to export targets of laggy mds%d\n" , i); |
5060 | __open_export_target_sessions(mdsc, session: s); |
5061 | } |
5062 | } |
5063 | } |
5064 | |
5065 | |
5066 | |
5067 | /* |
5068 | * leases |
5069 | */ |
5070 | |
5071 | /* |
5072 | * caller must hold session s_mutex, dentry->d_lock |
5073 | */ |
5074 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) |
5075 | { |
5076 | struct ceph_dentry_info *di = ceph_dentry(dentry); |
5077 | |
5078 | ceph_put_mds_session(s: di->lease_session); |
5079 | di->lease_session = NULL; |
5080 | } |
5081 | |
5082 | static void handle_lease(struct ceph_mds_client *mdsc, |
5083 | struct ceph_mds_session *session, |
5084 | struct ceph_msg *msg) |
5085 | { |
5086 | struct ceph_client *cl = mdsc->fsc->client; |
5087 | struct super_block *sb = mdsc->fsc->sb; |
5088 | struct inode *inode; |
5089 | struct dentry *parent, *dentry; |
5090 | struct ceph_dentry_info *di; |
5091 | int mds = session->s_mds; |
5092 | struct ceph_mds_lease *h = msg->front.iov_base; |
5093 | u32 seq; |
5094 | struct ceph_vino vino; |
5095 | struct qstr dname; |
5096 | int release = 0; |
5097 | |
5098 | doutc(cl, "from mds%d\n" , mds); |
5099 | |
5100 | if (!ceph_inc_mds_stopping_blocker(mdsc, session)) |
5101 | return; |
5102 | |
5103 | /* decode */ |
5104 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) |
5105 | goto bad; |
5106 | vino.ino = le64_to_cpu(h->ino); |
5107 | vino.snap = CEPH_NOSNAP; |
5108 | seq = le32_to_cpu(h->seq); |
5109 | dname.len = get_unaligned_le32(p: h + 1); |
5110 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len) |
5111 | goto bad; |
5112 | dname.name = (void *)(h + 1) + sizeof(u32); |
5113 | |
5114 | /* lookup inode */ |
5115 | inode = ceph_find_inode(sb, vino); |
5116 | doutc(cl, "%s, ino %llx %p %.*s\n" , ceph_lease_op_name(h->action), |
5117 | vino.ino, inode, dname.len, dname.name); |
5118 | |
5119 | mutex_lock(&session->s_mutex); |
5120 | if (!inode) { |
5121 | doutc(cl, "no inode %llx\n" , vino.ino); |
5122 | goto release; |
5123 | } |
5124 | |
5125 | /* dentry */ |
5126 | parent = d_find_alias(inode); |
5127 | if (!parent) { |
5128 | doutc(cl, "no parent dentry on inode %p\n" , inode); |
5129 | WARN_ON(1); |
5130 | goto release; /* hrm... */ |
5131 | } |
5132 | dname.hash = full_name_hash(salt: parent, dname.name, dname.len); |
5133 | dentry = d_lookup(parent, &dname); |
5134 | dput(parent); |
5135 | if (!dentry) |
5136 | goto release; |
5137 | |
5138 | spin_lock(lock: &dentry->d_lock); |
5139 | di = ceph_dentry(dentry); |
5140 | switch (h->action) { |
5141 | case CEPH_MDS_LEASE_REVOKE: |
5142 | if (di->lease_session == session) { |
5143 | if (ceph_seq_cmp(a: di->lease_seq, b: seq) > 0) |
5144 | h->seq = cpu_to_le32(di->lease_seq); |
5145 | __ceph_mdsc_drop_dentry_lease(dentry); |
5146 | } |
5147 | release = 1; |
5148 | break; |
5149 | |
5150 | case CEPH_MDS_LEASE_RENEW: |
5151 | if (di->lease_session == session && |
5152 | di->lease_gen == atomic_read(v: &session->s_cap_gen) && |
5153 | di->lease_renew_from && |
5154 | di->lease_renew_after == 0) { |
5155 | unsigned long duration = |
5156 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
5157 | |
5158 | di->lease_seq = seq; |
5159 | di->time = di->lease_renew_from + duration; |
5160 | di->lease_renew_after = di->lease_renew_from + |
5161 | (duration >> 1); |
5162 | di->lease_renew_from = 0; |
5163 | } |
5164 | break; |
5165 | } |
5166 | spin_unlock(lock: &dentry->d_lock); |
5167 | dput(dentry); |
5168 | |
5169 | if (!release) |
5170 | goto out; |
5171 | |
5172 | release: |
5173 | /* let's just reuse the same message */ |
5174 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; |
5175 | ceph_msg_get(msg); |
5176 | ceph_con_send(con: &session->s_con, msg); |
5177 | |
5178 | out: |
5179 | mutex_unlock(lock: &session->s_mutex); |
5180 | iput(inode); |
5181 | |
5182 | ceph_dec_mds_stopping_blocker(mdsc); |
5183 | return; |
5184 | |
5185 | bad: |
5186 | ceph_dec_mds_stopping_blocker(mdsc); |
5187 | |
5188 | pr_err_client(cl, "corrupt lease message\n" ); |
5189 | ceph_msg_dump(msg); |
5190 | } |
5191 | |
5192 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, |
5193 | struct dentry *dentry, char action, |
5194 | u32 seq) |
5195 | { |
5196 | struct ceph_client *cl = session->s_mdsc->fsc->client; |
5197 | struct ceph_msg *msg; |
5198 | struct ceph_mds_lease *lease; |
5199 | struct inode *dir; |
5200 | int len = sizeof(*lease) + sizeof(u32) + NAME_MAX; |
5201 | |
5202 | doutc(cl, "identry %p %s to mds%d\n" , dentry, ceph_lease_op_name(action), |
5203 | session->s_mds); |
5204 | |
5205 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, front_len: len, GFP_NOFS, can_fail: false); |
5206 | if (!msg) |
5207 | return; |
5208 | lease = msg->front.iov_base; |
5209 | lease->action = action; |
5210 | lease->seq = cpu_to_le32(seq); |
5211 | |
5212 | spin_lock(lock: &dentry->d_lock); |
5213 | dir = d_inode(dentry: dentry->d_parent); |
5214 | lease->ino = cpu_to_le64(ceph_ino(dir)); |
5215 | lease->first = lease->last = cpu_to_le64(ceph_snap(dir)); |
5216 | |
5217 | put_unaligned_le32(val: dentry->d_name.len, p: lease + 1); |
5218 | memcpy((void *)(lease + 1) + 4, |
5219 | dentry->d_name.name, dentry->d_name.len); |
5220 | spin_unlock(lock: &dentry->d_lock); |
5221 | |
5222 | ceph_con_send(con: &session->s_con, msg); |
5223 | } |
5224 | |
5225 | /* |
5226 | * lock unlock the session, to wait ongoing session activities |
5227 | */ |
5228 | static void lock_unlock_session(struct ceph_mds_session *s) |
5229 | { |
5230 | mutex_lock(&s->s_mutex); |
5231 | mutex_unlock(lock: &s->s_mutex); |
5232 | } |
5233 | |
5234 | static void maybe_recover_session(struct ceph_mds_client *mdsc) |
5235 | { |
5236 | struct ceph_client *cl = mdsc->fsc->client; |
5237 | struct ceph_fs_client *fsc = mdsc->fsc; |
5238 | |
5239 | if (!ceph_test_mount_opt(fsc, CLEANRECOVER)) |
5240 | return; |
5241 | |
5242 | if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED) |
5243 | return; |
5244 | |
5245 | if (!READ_ONCE(fsc->blocklisted)) |
5246 | return; |
5247 | |
5248 | pr_info_client(cl, "auto reconnect after blocklisted\n" ); |
5249 | ceph_force_reconnect(sb: fsc->sb); |
5250 | } |
5251 | |
5252 | bool check_session_state(struct ceph_mds_session *s) |
5253 | { |
5254 | struct ceph_client *cl = s->s_mdsc->fsc->client; |
5255 | |
5256 | switch (s->s_state) { |
5257 | case CEPH_MDS_SESSION_OPEN: |
5258 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { |
5259 | s->s_state = CEPH_MDS_SESSION_HUNG; |
5260 | pr_info_client(cl, "mds%d hung\n" , s->s_mds); |
5261 | } |
5262 | break; |
5263 | case CEPH_MDS_SESSION_CLOSING: |
5264 | case CEPH_MDS_SESSION_NEW: |
5265 | case CEPH_MDS_SESSION_RESTARTING: |
5266 | case CEPH_MDS_SESSION_CLOSED: |
5267 | case CEPH_MDS_SESSION_REJECTED: |
5268 | return false; |
5269 | } |
5270 | |
5271 | return true; |
5272 | } |
5273 | |
5274 | /* |
5275 | * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply, |
5276 | * then we need to retransmit that request. |
5277 | */ |
5278 | void inc_session_sequence(struct ceph_mds_session *s) |
5279 | { |
5280 | struct ceph_client *cl = s->s_mdsc->fsc->client; |
5281 | |
5282 | lockdep_assert_held(&s->s_mutex); |
5283 | |
5284 | s->s_seq++; |
5285 | |
5286 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { |
5287 | int ret; |
5288 | |
5289 | doutc(cl, "resending session close request for mds%d\n" , s->s_mds); |
5290 | ret = request_close_session(session: s); |
5291 | if (ret < 0) |
5292 | pr_err_client(cl, "unable to close session to mds%d: %d\n" , |
5293 | s->s_mds, ret); |
5294 | } |
5295 | } |
5296 | |
5297 | /* |
5298 | * delayed work -- periodically trim expired leases, renew caps with mds. If |
5299 | * the @delay parameter is set to 0 or if it's more than 5 secs, the default |
5300 | * workqueue delay value of 5 secs will be used. |
5301 | */ |
5302 | static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay) |
5303 | { |
5304 | unsigned long max_delay = HZ * 5; |
5305 | |
5306 | /* 5 secs default delay */ |
5307 | if (!delay || (delay > max_delay)) |
5308 | delay = max_delay; |
5309 | schedule_delayed_work(dwork: &mdsc->delayed_work, |
5310 | delay: round_jiffies_relative(j: delay)); |
5311 | } |
5312 | |
5313 | static void delayed_work(struct work_struct *work) |
5314 | { |
5315 | struct ceph_mds_client *mdsc = |
5316 | container_of(work, struct ceph_mds_client, delayed_work.work); |
5317 | unsigned long delay; |
5318 | int renew_interval; |
5319 | int renew_caps; |
5320 | int i; |
5321 | |
5322 | doutc(mdsc->fsc->client, "mdsc delayed_work\n" ); |
5323 | |
5324 | if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) |
5325 | return; |
5326 | |
5327 | mutex_lock(&mdsc->mutex); |
5328 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; |
5329 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + |
5330 | mdsc->last_renew_caps); |
5331 | if (renew_caps) |
5332 | mdsc->last_renew_caps = jiffies; |
5333 | |
5334 | for (i = 0; i < mdsc->max_sessions; i++) { |
5335 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, mds: i); |
5336 | if (!s) |
5337 | continue; |
5338 | |
5339 | if (!check_session_state(s)) { |
5340 | ceph_put_mds_session(s); |
5341 | continue; |
5342 | } |
5343 | mutex_unlock(lock: &mdsc->mutex); |
5344 | |
5345 | mutex_lock(&s->s_mutex); |
5346 | if (renew_caps) |
5347 | send_renew_caps(mdsc, session: s); |
5348 | else |
5349 | ceph_con_keepalive(con: &s->s_con); |
5350 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
5351 | s->s_state == CEPH_MDS_SESSION_HUNG) |
5352 | ceph_send_cap_releases(mdsc, session: s); |
5353 | mutex_unlock(lock: &s->s_mutex); |
5354 | ceph_put_mds_session(s); |
5355 | |
5356 | mutex_lock(&mdsc->mutex); |
5357 | } |
5358 | mutex_unlock(lock: &mdsc->mutex); |
5359 | |
5360 | delay = ceph_check_delayed_caps(mdsc); |
5361 | |
5362 | ceph_queue_cap_reclaim_work(mdsc); |
5363 | |
5364 | ceph_trim_snapid_map(mdsc); |
5365 | |
5366 | maybe_recover_session(mdsc); |
5367 | |
5368 | schedule_delayed(mdsc, delay); |
5369 | } |
5370 | |
5371 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
5372 | |
5373 | { |
5374 | struct ceph_mds_client *mdsc; |
5375 | int err; |
5376 | |
5377 | mdsc = kzalloc(size: sizeof(struct ceph_mds_client), GFP_NOFS); |
5378 | if (!mdsc) |
5379 | return -ENOMEM; |
5380 | mdsc->fsc = fsc; |
5381 | mutex_init(&mdsc->mutex); |
5382 | mdsc->mdsmap = kzalloc(size: sizeof(*mdsc->mdsmap), GFP_NOFS); |
5383 | if (!mdsc->mdsmap) { |
5384 | err = -ENOMEM; |
5385 | goto err_mdsc; |
5386 | } |
5387 | |
5388 | init_completion(x: &mdsc->safe_umount_waiters); |
5389 | spin_lock_init(&mdsc->stopping_lock); |
5390 | atomic_set(v: &mdsc->stopping_blockers, i: 0); |
5391 | init_completion(x: &mdsc->stopping_waiter); |
5392 | init_waitqueue_head(&mdsc->session_close_wq); |
5393 | INIT_LIST_HEAD(list: &mdsc->waiting_for_map); |
5394 | mdsc->quotarealms_inodes = RB_ROOT; |
5395 | mutex_init(&mdsc->quotarealms_inodes_mutex); |
5396 | init_rwsem(&mdsc->snap_rwsem); |
5397 | mdsc->snap_realms = RB_ROOT; |
5398 | INIT_LIST_HEAD(list: &mdsc->snap_empty); |
5399 | spin_lock_init(&mdsc->snap_empty_lock); |
5400 | mdsc->request_tree = RB_ROOT; |
5401 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
5402 | mdsc->last_renew_caps = jiffies; |
5403 | INIT_LIST_HEAD(list: &mdsc->cap_delay_list); |
5404 | INIT_LIST_HEAD(list: &mdsc->cap_wait_list); |
5405 | spin_lock_init(&mdsc->cap_delay_lock); |
5406 | INIT_LIST_HEAD(list: &mdsc->cap_unlink_delay_list); |
5407 | INIT_LIST_HEAD(list: &mdsc->snap_flush_list); |
5408 | spin_lock_init(&mdsc->snap_flush_lock); |
5409 | mdsc->last_cap_flush_tid = 1; |
5410 | INIT_LIST_HEAD(list: &mdsc->cap_flush_list); |
5411 | INIT_LIST_HEAD(list: &mdsc->cap_dirty_migrating); |
5412 | spin_lock_init(&mdsc->cap_dirty_lock); |
5413 | init_waitqueue_head(&mdsc->cap_flushing_wq); |
5414 | INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work); |
5415 | INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work); |
5416 | err = ceph_metric_init(m: &mdsc->metric); |
5417 | if (err) |
5418 | goto err_mdsmap; |
5419 | |
5420 | spin_lock_init(&mdsc->dentry_list_lock); |
5421 | INIT_LIST_HEAD(list: &mdsc->dentry_leases); |
5422 | INIT_LIST_HEAD(list: &mdsc->dentry_dir_leases); |
5423 | |
5424 | ceph_caps_init(mdsc); |
5425 | ceph_adjust_caps_max_min(mdsc, fsopt: fsc->mount_options); |
5426 | |
5427 | spin_lock_init(&mdsc->snapid_map_lock); |
5428 | mdsc->snapid_map_tree = RB_ROOT; |
5429 | INIT_LIST_HEAD(list: &mdsc->snapid_map_lru); |
5430 | |
5431 | init_rwsem(&mdsc->pool_perm_rwsem); |
5432 | mdsc->pool_perm_tree = RB_ROOT; |
5433 | |
5434 | strscpy(mdsc->nodename, utsname()->nodename, |
5435 | sizeof(mdsc->nodename)); |
5436 | |
5437 | fsc->mdsc = mdsc; |
5438 | return 0; |
5439 | |
5440 | err_mdsmap: |
5441 | kfree(objp: mdsc->mdsmap); |
5442 | err_mdsc: |
5443 | kfree(objp: mdsc); |
5444 | return err; |
5445 | } |
5446 | |
5447 | /* |
5448 | * Wait for safe replies on open mds requests. If we time out, drop |
5449 | * all requests from the tree to avoid dangling dentry refs. |
5450 | */ |
5451 | static void wait_requests(struct ceph_mds_client *mdsc) |
5452 | { |
5453 | struct ceph_client *cl = mdsc->fsc->client; |
5454 | struct ceph_options *opts = mdsc->fsc->client->options; |
5455 | struct ceph_mds_request *req; |
5456 | |
5457 | mutex_lock(&mdsc->mutex); |
5458 | if (__get_oldest_req(mdsc)) { |
5459 | mutex_unlock(lock: &mdsc->mutex); |
5460 | |
5461 | doutc(cl, "waiting for requests\n" ); |
5462 | wait_for_completion_timeout(x: &mdsc->safe_umount_waiters, |
5463 | timeout: ceph_timeout_jiffies(timeout: opts->mount_timeout)); |
5464 | |
5465 | /* tear down remaining requests */ |
5466 | mutex_lock(&mdsc->mutex); |
5467 | while ((req = __get_oldest_req(mdsc))) { |
5468 | doutc(cl, "timed out on tid %llu\n" , req->r_tid); |
5469 | list_del_init(entry: &req->r_wait); |
5470 | __unregister_request(mdsc, req); |
5471 | } |
5472 | } |
5473 | mutex_unlock(lock: &mdsc->mutex); |
5474 | doutc(cl, "done\n" ); |
5475 | } |
5476 | |
5477 | void send_flush_mdlog(struct ceph_mds_session *s) |
5478 | { |
5479 | struct ceph_client *cl = s->s_mdsc->fsc->client; |
5480 | struct ceph_msg *msg; |
5481 | |
5482 | /* |
5483 | * Pre-luminous MDS crashes when it sees an unknown session request |
5484 | */ |
5485 | if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS)) |
5486 | return; |
5487 | |
5488 | mutex_lock(&s->s_mutex); |
5489 | doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n" , |
5490 | s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); |
5491 | msg = ceph_create_session_msg(op: CEPH_SESSION_REQUEST_FLUSH_MDLOG, |
5492 | seq: s->s_seq); |
5493 | if (!msg) { |
5494 | pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n" , |
5495 | s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); |
5496 | } else { |
5497 | ceph_con_send(con: &s->s_con, msg); |
5498 | } |
5499 | mutex_unlock(lock: &s->s_mutex); |
5500 | } |
5501 | |
5502 | /* |
5503 | * called before mount is ro, and before dentries are torn down. |
5504 | * (hmm, does this still race with new lookups?) |
5505 | */ |
5506 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) |
5507 | { |
5508 | doutc(mdsc->fsc->client, "begin\n" ); |
5509 | mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; |
5510 | |
5511 | ceph_mdsc_iterate_sessions(mdsc, cb: send_flush_mdlog, check_state: true); |
5512 | ceph_mdsc_iterate_sessions(mdsc, cb: lock_unlock_session, check_state: false); |
5513 | ceph_flush_dirty_caps(mdsc); |
5514 | wait_requests(mdsc); |
5515 | |
5516 | /* |
5517 | * wait for reply handlers to drop their request refs and |
5518 | * their inode/dcache refs |
5519 | */ |
5520 | ceph_msgr_flush(); |
5521 | |
5522 | ceph_cleanup_quotarealms_inodes(mdsc); |
5523 | doutc(mdsc->fsc->client, "done\n" ); |
5524 | } |
5525 | |
5526 | /* |
5527 | * flush the mdlog and wait for all write mds requests to flush. |
5528 | */ |
5529 | static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc, |
5530 | u64 want_tid) |
5531 | { |
5532 | struct ceph_client *cl = mdsc->fsc->client; |
5533 | struct ceph_mds_request *req = NULL, *nextreq; |
5534 | struct ceph_mds_session *last_session = NULL; |
5535 | struct rb_node *n; |
5536 | |
5537 | mutex_lock(&mdsc->mutex); |
5538 | doutc(cl, "want %lld\n" , want_tid); |
5539 | restart: |
5540 | req = __get_oldest_req(mdsc); |
5541 | while (req && req->r_tid <= want_tid) { |
5542 | /* find next request */ |
5543 | n = rb_next(&req->r_node); |
5544 | if (n) |
5545 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); |
5546 | else |
5547 | nextreq = NULL; |
5548 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
5549 | (req->r_op & CEPH_MDS_OP_WRITE)) { |
5550 | struct ceph_mds_session *s = req->r_session; |
5551 | |
5552 | if (!s) { |
5553 | req = nextreq; |
5554 | continue; |
5555 | } |
5556 | |
5557 | /* write op */ |
5558 | ceph_mdsc_get_request(req); |
5559 | if (nextreq) |
5560 | ceph_mdsc_get_request(req: nextreq); |
5561 | s = ceph_get_mds_session(s); |
5562 | mutex_unlock(lock: &mdsc->mutex); |
5563 | |
5564 | /* send flush mdlog request to MDS */ |
5565 | if (last_session != s) { |
5566 | send_flush_mdlog(s); |
5567 | ceph_put_mds_session(s: last_session); |
5568 | last_session = s; |
5569 | } else { |
5570 | ceph_put_mds_session(s); |
5571 | } |
5572 | doutc(cl, "wait on %llu (want %llu)\n" , |
5573 | req->r_tid, want_tid); |
5574 | wait_for_completion(&req->r_safe_completion); |
5575 | |
5576 | mutex_lock(&mdsc->mutex); |
5577 | ceph_mdsc_put_request(req); |
5578 | if (!nextreq) |
5579 | break; /* next dne before, so we're done! */ |
5580 | if (RB_EMPTY_NODE(&nextreq->r_node)) { |
5581 | /* next request was removed from tree */ |
5582 | ceph_mdsc_put_request(req: nextreq); |
5583 | goto restart; |
5584 | } |
5585 | ceph_mdsc_put_request(req: nextreq); /* won't go away */ |
5586 | } |
5587 | req = nextreq; |
5588 | } |
5589 | mutex_unlock(lock: &mdsc->mutex); |
5590 | ceph_put_mds_session(s: last_session); |
5591 | doutc(cl, "done\n" ); |
5592 | } |
5593 | |
5594 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) |
5595 | { |
5596 | struct ceph_client *cl = mdsc->fsc->client; |
5597 | u64 want_tid, want_flush; |
5598 | |
5599 | if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) |
5600 | return; |
5601 | |
5602 | doutc(cl, "sync\n" ); |
5603 | mutex_lock(&mdsc->mutex); |
5604 | want_tid = mdsc->last_tid; |
5605 | mutex_unlock(lock: &mdsc->mutex); |
5606 | |
5607 | ceph_flush_dirty_caps(mdsc); |
5608 | spin_lock(lock: &mdsc->cap_dirty_lock); |
5609 | want_flush = mdsc->last_cap_flush_tid; |
5610 | if (!list_empty(head: &mdsc->cap_flush_list)) { |
5611 | struct ceph_cap_flush *cf = |
5612 | list_last_entry(&mdsc->cap_flush_list, |
5613 | struct ceph_cap_flush, g_list); |
5614 | cf->wake = true; |
5615 | } |
5616 | spin_unlock(lock: &mdsc->cap_dirty_lock); |
5617 | |
5618 | doutc(cl, "sync want tid %lld flush_seq %lld\n" , want_tid, want_flush); |
5619 | |
5620 | flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid); |
5621 | wait_caps_flush(mdsc, want_flush_tid: want_flush); |
5622 | } |
5623 | |
5624 | /* |
5625 | * true if all sessions are closed, or we force unmount |
5626 | */ |
5627 | static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) |
5628 | { |
5629 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
5630 | return true; |
5631 | return atomic_read(v: &mdsc->num_sessions) <= skipped; |
5632 | } |
5633 | |
5634 | /* |
5635 | * called after sb is ro or when metadata corrupted. |
5636 | */ |
5637 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) |
5638 | { |
5639 | struct ceph_options *opts = mdsc->fsc->client->options; |
5640 | struct ceph_client *cl = mdsc->fsc->client; |
5641 | struct ceph_mds_session *session; |
5642 | int i; |
5643 | int skipped = 0; |
5644 | |
5645 | doutc(cl, "begin\n" ); |
5646 | |
5647 | /* close sessions */ |
5648 | mutex_lock(&mdsc->mutex); |
5649 | for (i = 0; i < mdsc->max_sessions; i++) { |
5650 | session = __ceph_lookup_mds_session(mdsc, mds: i); |
5651 | if (!session) |
5652 | continue; |
5653 | mutex_unlock(lock: &mdsc->mutex); |
5654 | mutex_lock(&session->s_mutex); |
5655 | if (__close_session(mdsc, session) <= 0) |
5656 | skipped++; |
5657 | mutex_unlock(lock: &session->s_mutex); |
5658 | ceph_put_mds_session(s: session); |
5659 | mutex_lock(&mdsc->mutex); |
5660 | } |
5661 | mutex_unlock(lock: &mdsc->mutex); |
5662 | |
5663 | doutc(cl, "waiting for sessions to close\n" ); |
5664 | wait_event_timeout(mdsc->session_close_wq, |
5665 | done_closing_sessions(mdsc, skipped), |
5666 | ceph_timeout_jiffies(opts->mount_timeout)); |
5667 | |
5668 | /* tear down remaining sessions */ |
5669 | mutex_lock(&mdsc->mutex); |
5670 | for (i = 0; i < mdsc->max_sessions; i++) { |
5671 | if (mdsc->sessions[i]) { |
5672 | session = ceph_get_mds_session(s: mdsc->sessions[i]); |
5673 | __unregister_session(mdsc, s: session); |
5674 | mutex_unlock(lock: &mdsc->mutex); |
5675 | mutex_lock(&session->s_mutex); |
5676 | remove_session_caps(session); |
5677 | mutex_unlock(lock: &session->s_mutex); |
5678 | ceph_put_mds_session(s: session); |
5679 | mutex_lock(&mdsc->mutex); |
5680 | } |
5681 | } |
5682 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
5683 | mutex_unlock(lock: &mdsc->mutex); |
5684 | |
5685 | ceph_cleanup_snapid_map(mdsc); |
5686 | ceph_cleanup_global_and_empty_realms(mdsc); |
5687 | |
5688 | cancel_work_sync(work: &mdsc->cap_reclaim_work); |
5689 | cancel_work_sync(work: &mdsc->cap_unlink_work); |
5690 | cancel_delayed_work_sync(dwork: &mdsc->delayed_work); /* cancel timer */ |
5691 | |
5692 | doutc(cl, "done\n" ); |
5693 | } |
5694 | |
5695 | void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) |
5696 | { |
5697 | struct ceph_mds_session *session; |
5698 | int mds; |
5699 | |
5700 | doutc(mdsc->fsc->client, "force umount\n" ); |
5701 | |
5702 | mutex_lock(&mdsc->mutex); |
5703 | for (mds = 0; mds < mdsc->max_sessions; mds++) { |
5704 | session = __ceph_lookup_mds_session(mdsc, mds); |
5705 | if (!session) |
5706 | continue; |
5707 | |
5708 | if (session->s_state == CEPH_MDS_SESSION_REJECTED) |
5709 | __unregister_session(mdsc, s: session); |
5710 | __wake_requests(mdsc, head: &session->s_waiting); |
5711 | mutex_unlock(lock: &mdsc->mutex); |
5712 | |
5713 | mutex_lock(&session->s_mutex); |
5714 | __close_session(mdsc, session); |
5715 | if (session->s_state == CEPH_MDS_SESSION_CLOSING) { |
5716 | cleanup_session_requests(mdsc, session); |
5717 | remove_session_caps(session); |
5718 | } |
5719 | mutex_unlock(lock: &session->s_mutex); |
5720 | ceph_put_mds_session(s: session); |
5721 | |
5722 | mutex_lock(&mdsc->mutex); |
5723 | kick_requests(mdsc, mds); |
5724 | } |
5725 | __wake_requests(mdsc, head: &mdsc->waiting_for_map); |
5726 | mutex_unlock(lock: &mdsc->mutex); |
5727 | } |
5728 | |
5729 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
5730 | { |
5731 | doutc(mdsc->fsc->client, "stop\n" ); |
5732 | /* |
5733 | * Make sure the delayed work stopped before releasing |
5734 | * the resources. |
5735 | * |
5736 | * Because the cancel_delayed_work_sync() will only |
5737 | * guarantee that the work finishes executing. But the |
5738 | * delayed work will re-arm itself again after that. |
5739 | */ |
5740 | flush_delayed_work(dwork: &mdsc->delayed_work); |
5741 | |
5742 | if (mdsc->mdsmap) |
5743 | ceph_mdsmap_destroy(m: mdsc->mdsmap); |
5744 | kfree(objp: mdsc->sessions); |
5745 | ceph_caps_finalize(mdsc); |
5746 | ceph_pool_perm_destroy(mdsc); |
5747 | } |
5748 | |
5749 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
5750 | { |
5751 | struct ceph_mds_client *mdsc = fsc->mdsc; |
5752 | doutc(fsc->client, "%p\n" , mdsc); |
5753 | |
5754 | if (!mdsc) |
5755 | return; |
5756 | |
5757 | /* flush out any connection work with references to us */ |
5758 | ceph_msgr_flush(); |
5759 | |
5760 | ceph_mdsc_stop(mdsc); |
5761 | |
5762 | ceph_metric_destroy(m: &mdsc->metric); |
5763 | |
5764 | fsc->mdsc = NULL; |
5765 | kfree(objp: mdsc); |
5766 | doutc(fsc->client, "%p done\n" , mdsc); |
5767 | } |
5768 | |
5769 | void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
5770 | { |
5771 | struct ceph_fs_client *fsc = mdsc->fsc; |
5772 | struct ceph_client *cl = fsc->client; |
5773 | const char *mds_namespace = fsc->mount_options->mds_namespace; |
5774 | void *p = msg->front.iov_base; |
5775 | void *end = p + msg->front.iov_len; |
5776 | u32 epoch; |
5777 | u32 num_fs; |
5778 | u32 mount_fscid = (u32)-1; |
5779 | int err = -EINVAL; |
5780 | |
5781 | ceph_decode_need(&p, end, sizeof(u32), bad); |
5782 | epoch = ceph_decode_32(p: &p); |
5783 | |
5784 | doutc(cl, "epoch %u\n" , epoch); |
5785 | |
5786 | /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */ |
5787 | ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad); |
5788 | |
5789 | ceph_decode_32_safe(&p, end, num_fs, bad); |
5790 | while (num_fs-- > 0) { |
5791 | void *info_p, *info_end; |
5792 | u32 info_len; |
5793 | u32 fscid, namelen; |
5794 | |
5795 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); |
5796 | p += 2; // info_v, info_cv |
5797 | info_len = ceph_decode_32(p: &p); |
5798 | ceph_decode_need(&p, end, info_len, bad); |
5799 | info_p = p; |
5800 | info_end = p + info_len; |
5801 | p = info_end; |
5802 | |
5803 | ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); |
5804 | fscid = ceph_decode_32(p: &info_p); |
5805 | namelen = ceph_decode_32(p: &info_p); |
5806 | ceph_decode_need(&info_p, info_end, namelen, bad); |
5807 | |
5808 | if (mds_namespace && |
5809 | strlen(mds_namespace) == namelen && |
5810 | !strncmp(mds_namespace, (char *)info_p, namelen)) { |
5811 | mount_fscid = fscid; |
5812 | break; |
5813 | } |
5814 | } |
5815 | |
5816 | ceph_monc_got_map(monc: &fsc->client->monc, sub: CEPH_SUB_FSMAP, epoch); |
5817 | if (mount_fscid != (u32)-1) { |
5818 | fsc->client->monc.fs_cluster_id = mount_fscid; |
5819 | ceph_monc_want_map(monc: &fsc->client->monc, sub: CEPH_SUB_MDSMAP, |
5820 | epoch: 0, continuous: true); |
5821 | ceph_monc_renew_subs(monc: &fsc->client->monc); |
5822 | } else { |
5823 | err = -ENOENT; |
5824 | goto err_out; |
5825 | } |
5826 | return; |
5827 | |
5828 | bad: |
5829 | pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n" , |
5830 | err); |
5831 | ceph_umount_begin(sb: mdsc->fsc->sb); |
5832 | ceph_msg_dump(msg); |
5833 | err_out: |
5834 | mutex_lock(&mdsc->mutex); |
5835 | mdsc->mdsmap_err = err; |
5836 | __wake_requests(mdsc, head: &mdsc->waiting_for_map); |
5837 | mutex_unlock(lock: &mdsc->mutex); |
5838 | } |
5839 | |
5840 | /* |
5841 | * handle mds map update. |
5842 | */ |
5843 | void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
5844 | { |
5845 | struct ceph_client *cl = mdsc->fsc->client; |
5846 | u32 epoch; |
5847 | u32 maplen; |
5848 | void *p = msg->front.iov_base; |
5849 | void *end = p + msg->front.iov_len; |
5850 | struct ceph_mdsmap *newmap, *oldmap; |
5851 | struct ceph_fsid fsid; |
5852 | int err = -EINVAL; |
5853 | |
5854 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); |
5855 | ceph_decode_copy(p: &p, pv: &fsid, n: sizeof(fsid)); |
5856 | if (ceph_check_fsid(client: mdsc->fsc->client, fsid: &fsid) < 0) |
5857 | return; |
5858 | epoch = ceph_decode_32(p: &p); |
5859 | maplen = ceph_decode_32(p: &p); |
5860 | doutc(cl, "epoch %u len %d\n" , epoch, (int)maplen); |
5861 | |
5862 | /* do we need it? */ |
5863 | mutex_lock(&mdsc->mutex); |
5864 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { |
5865 | doutc(cl, "epoch %u <= our %u\n" , epoch, mdsc->mdsmap->m_epoch); |
5866 | mutex_unlock(lock: &mdsc->mutex); |
5867 | return; |
5868 | } |
5869 | |
5870 | newmap = ceph_mdsmap_decode(mdsc, p: &p, end, msgr2: ceph_msgr2(client: mdsc->fsc->client)); |
5871 | if (IS_ERR(ptr: newmap)) { |
5872 | err = PTR_ERR(ptr: newmap); |
5873 | goto bad_unlock; |
5874 | } |
5875 | |
5876 | /* swap into place */ |
5877 | if (mdsc->mdsmap) { |
5878 | oldmap = mdsc->mdsmap; |
5879 | mdsc->mdsmap = newmap; |
5880 | check_new_map(mdsc, newmap, oldmap); |
5881 | ceph_mdsmap_destroy(m: oldmap); |
5882 | } else { |
5883 | mdsc->mdsmap = newmap; /* first mds map */ |
5884 | } |
5885 | mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size, |
5886 | MAX_LFS_FILESIZE); |
5887 | |
5888 | __wake_requests(mdsc, head: &mdsc->waiting_for_map); |
5889 | ceph_monc_got_map(monc: &mdsc->fsc->client->monc, sub: CEPH_SUB_MDSMAP, |
5890 | epoch: mdsc->mdsmap->m_epoch); |
5891 | |
5892 | mutex_unlock(lock: &mdsc->mutex); |
5893 | schedule_delayed(mdsc, delay: 0); |
5894 | return; |
5895 | |
5896 | bad_unlock: |
5897 | mutex_unlock(lock: &mdsc->mutex); |
5898 | bad: |
5899 | pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n" , |
5900 | err); |
5901 | ceph_umount_begin(sb: mdsc->fsc->sb); |
5902 | ceph_msg_dump(msg); |
5903 | return; |
5904 | } |
5905 | |
5906 | static struct ceph_connection *mds_get_con(struct ceph_connection *con) |
5907 | { |
5908 | struct ceph_mds_session *s = con->private; |
5909 | |
5910 | if (ceph_get_mds_session(s)) |
5911 | return con; |
5912 | return NULL; |
5913 | } |
5914 | |
5915 | static void mds_put_con(struct ceph_connection *con) |
5916 | { |
5917 | struct ceph_mds_session *s = con->private; |
5918 | |
5919 | ceph_put_mds_session(s); |
5920 | } |
5921 | |
5922 | /* |
5923 | * if the client is unresponsive for long enough, the mds will kill |
5924 | * the session entirely. |
5925 | */ |
5926 | static void mds_peer_reset(struct ceph_connection *con) |
5927 | { |
5928 | struct ceph_mds_session *s = con->private; |
5929 | struct ceph_mds_client *mdsc = s->s_mdsc; |
5930 | |
5931 | pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n" , |
5932 | s->s_mds); |
5933 | if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO && |
5934 | ceph_mdsmap_get_state(m: mdsc->mdsmap, w: s->s_mds) >= CEPH_MDS_STATE_RECONNECT) |
5935 | send_mds_reconnect(mdsc, session: s); |
5936 | } |
5937 | |
5938 | static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg) |
5939 | { |
5940 | struct ceph_mds_session *s = con->private; |
5941 | struct ceph_mds_client *mdsc = s->s_mdsc; |
5942 | struct ceph_client *cl = mdsc->fsc->client; |
5943 | int type = le16_to_cpu(msg->hdr.type); |
5944 | |
5945 | mutex_lock(&mdsc->mutex); |
5946 | if (__verify_registered_session(mdsc, s) < 0) { |
5947 | mutex_unlock(lock: &mdsc->mutex); |
5948 | goto out; |
5949 | } |
5950 | mutex_unlock(lock: &mdsc->mutex); |
5951 | |
5952 | switch (type) { |
5953 | case CEPH_MSG_MDS_MAP: |
5954 | ceph_mdsc_handle_mdsmap(mdsc, msg); |
5955 | break; |
5956 | case CEPH_MSG_FS_MAP_USER: |
5957 | ceph_mdsc_handle_fsmap(mdsc, msg); |
5958 | break; |
5959 | case CEPH_MSG_CLIENT_SESSION: |
5960 | handle_session(session: s, msg); |
5961 | break; |
5962 | case CEPH_MSG_CLIENT_REPLY: |
5963 | handle_reply(session: s, msg); |
5964 | break; |
5965 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: |
5966 | handle_forward(mdsc, session: s, msg); |
5967 | break; |
5968 | case CEPH_MSG_CLIENT_CAPS: |
5969 | ceph_handle_caps(session: s, msg); |
5970 | break; |
5971 | case CEPH_MSG_CLIENT_SNAP: |
5972 | ceph_handle_snap(mdsc, session: s, msg); |
5973 | break; |
5974 | case CEPH_MSG_CLIENT_LEASE: |
5975 | handle_lease(mdsc, session: s, msg); |
5976 | break; |
5977 | case CEPH_MSG_CLIENT_QUOTA: |
5978 | ceph_handle_quota(mdsc, session: s, msg); |
5979 | break; |
5980 | |
5981 | default: |
5982 | pr_err_client(cl, "received unknown message type %d %s\n" , |
5983 | type, ceph_msg_type_name(type)); |
5984 | } |
5985 | out: |
5986 | ceph_msg_put(msg); |
5987 | } |
5988 | |
5989 | /* |
5990 | * authentication |
5991 | */ |
5992 | |
5993 | /* |
5994 | * Note: returned pointer is the address of a structure that's |
5995 | * managed separately. Caller must *not* attempt to free it. |
5996 | */ |
5997 | static struct ceph_auth_handshake * |
5998 | mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new) |
5999 | { |
6000 | struct ceph_mds_session *s = con->private; |
6001 | struct ceph_mds_client *mdsc = s->s_mdsc; |
6002 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
6003 | struct ceph_auth_handshake *auth = &s->s_auth; |
6004 | int ret; |
6005 | |
6006 | ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS, |
6007 | force_new, proto, NULL, NULL); |
6008 | if (ret) |
6009 | return ERR_PTR(error: ret); |
6010 | |
6011 | return auth; |
6012 | } |
6013 | |
6014 | static int mds_add_authorizer_challenge(struct ceph_connection *con, |
6015 | void *challenge_buf, int challenge_buf_len) |
6016 | { |
6017 | struct ceph_mds_session *s = con->private; |
6018 | struct ceph_mds_client *mdsc = s->s_mdsc; |
6019 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
6020 | |
6021 | return ceph_auth_add_authorizer_challenge(ac, a: s->s_auth.authorizer, |
6022 | challenge_buf, challenge_buf_len); |
6023 | } |
6024 | |
6025 | static int mds_verify_authorizer_reply(struct ceph_connection *con) |
6026 | { |
6027 | struct ceph_mds_session *s = con->private; |
6028 | struct ceph_mds_client *mdsc = s->s_mdsc; |
6029 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
6030 | struct ceph_auth_handshake *auth = &s->s_auth; |
6031 | |
6032 | return ceph_auth_verify_authorizer_reply(ac, a: auth->authorizer, |
6033 | reply: auth->authorizer_reply_buf, reply_len: auth->authorizer_reply_buf_len, |
6034 | NULL, NULL, NULL, NULL); |
6035 | } |
6036 | |
6037 | static int mds_invalidate_authorizer(struct ceph_connection *con) |
6038 | { |
6039 | struct ceph_mds_session *s = con->private; |
6040 | struct ceph_mds_client *mdsc = s->s_mdsc; |
6041 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
6042 | |
6043 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
6044 | |
6045 | return ceph_monc_validate_auth(monc: &mdsc->fsc->client->monc); |
6046 | } |
6047 | |
6048 | static int mds_get_auth_request(struct ceph_connection *con, |
6049 | void *buf, int *buf_len, |
6050 | void **authorizer, int *authorizer_len) |
6051 | { |
6052 | struct ceph_mds_session *s = con->private; |
6053 | struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; |
6054 | struct ceph_auth_handshake *auth = &s->s_auth; |
6055 | int ret; |
6056 | |
6057 | ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS, |
6058 | buf, buf_len); |
6059 | if (ret) |
6060 | return ret; |
6061 | |
6062 | *authorizer = auth->authorizer_buf; |
6063 | *authorizer_len = auth->authorizer_buf_len; |
6064 | return 0; |
6065 | } |
6066 | |
6067 | static int mds_handle_auth_reply_more(struct ceph_connection *con, |
6068 | void *reply, int reply_len, |
6069 | void *buf, int *buf_len, |
6070 | void **authorizer, int *authorizer_len) |
6071 | { |
6072 | struct ceph_mds_session *s = con->private; |
6073 | struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; |
6074 | struct ceph_auth_handshake *auth = &s->s_auth; |
6075 | int ret; |
6076 | |
6077 | ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len, |
6078 | buf, buf_len); |
6079 | if (ret) |
6080 | return ret; |
6081 | |
6082 | *authorizer = auth->authorizer_buf; |
6083 | *authorizer_len = auth->authorizer_buf_len; |
6084 | return 0; |
6085 | } |
6086 | |
6087 | static int mds_handle_auth_done(struct ceph_connection *con, |
6088 | u64 global_id, void *reply, int reply_len, |
6089 | u8 *session_key, int *session_key_len, |
6090 | u8 *con_secret, int *con_secret_len) |
6091 | { |
6092 | struct ceph_mds_session *s = con->private; |
6093 | struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; |
6094 | struct ceph_auth_handshake *auth = &s->s_auth; |
6095 | |
6096 | return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len, |
6097 | session_key, session_key_len, |
6098 | con_secret, con_secret_len); |
6099 | } |
6100 | |
6101 | static int mds_handle_auth_bad_method(struct ceph_connection *con, |
6102 | int used_proto, int result, |
6103 | const int *allowed_protos, int proto_cnt, |
6104 | const int *allowed_modes, int mode_cnt) |
6105 | { |
6106 | struct ceph_mds_session *s = con->private; |
6107 | struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc; |
6108 | int ret; |
6109 | |
6110 | if (ceph_auth_handle_bad_authorizer(ac: monc->auth, CEPH_ENTITY_TYPE_MDS, |
6111 | used_proto, result, |
6112 | allowed_protos, proto_cnt, |
6113 | allowed_modes, mode_cnt)) { |
6114 | ret = ceph_monc_validate_auth(monc); |
6115 | if (ret) |
6116 | return ret; |
6117 | } |
6118 | |
6119 | return -EACCES; |
6120 | } |
6121 | |
6122 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
6123 | struct ceph_msg_header *hdr, int *skip) |
6124 | { |
6125 | struct ceph_msg *msg; |
6126 | int type = (int) le16_to_cpu(hdr->type); |
6127 | int front_len = (int) le32_to_cpu(hdr->front_len); |
6128 | |
6129 | if (con->in_msg) |
6130 | return con->in_msg; |
6131 | |
6132 | *skip = 0; |
6133 | msg = ceph_msg_new(type, front_len, GFP_NOFS, can_fail: false); |
6134 | if (!msg) { |
6135 | pr_err("unable to allocate msg type %d len %d\n" , |
6136 | type, front_len); |
6137 | return NULL; |
6138 | } |
6139 | |
6140 | return msg; |
6141 | } |
6142 | |
6143 | static int mds_sign_message(struct ceph_msg *msg) |
6144 | { |
6145 | struct ceph_mds_session *s = msg->con->private; |
6146 | struct ceph_auth_handshake *auth = &s->s_auth; |
6147 | |
6148 | return ceph_auth_sign_message(auth, msg); |
6149 | } |
6150 | |
6151 | static int mds_check_message_signature(struct ceph_msg *msg) |
6152 | { |
6153 | struct ceph_mds_session *s = msg->con->private; |
6154 | struct ceph_auth_handshake *auth = &s->s_auth; |
6155 | |
6156 | return ceph_auth_check_message_signature(auth, msg); |
6157 | } |
6158 | |
6159 | static const struct ceph_connection_operations mds_con_ops = { |
6160 | .get = mds_get_con, |
6161 | .put = mds_put_con, |
6162 | .alloc_msg = mds_alloc_msg, |
6163 | .dispatch = mds_dispatch, |
6164 | .peer_reset = mds_peer_reset, |
6165 | .get_authorizer = mds_get_authorizer, |
6166 | .add_authorizer_challenge = mds_add_authorizer_challenge, |
6167 | .verify_authorizer_reply = mds_verify_authorizer_reply, |
6168 | .invalidate_authorizer = mds_invalidate_authorizer, |
6169 | .sign_message = mds_sign_message, |
6170 | .check_message_signature = mds_check_message_signature, |
6171 | .get_auth_request = mds_get_auth_request, |
6172 | .handle_auth_reply_more = mds_handle_auth_reply_more, |
6173 | .handle_auth_done = mds_handle_auth_done, |
6174 | .handle_auth_bad_method = mds_handle_auth_bad_method, |
6175 | }; |
6176 | |
6177 | /* eof */ |
6178 | |