1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Daemon interface
3 *
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/completion.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/namei.h>
16#include <linux/poll.h>
17#include <linux/mount.h>
18#include <linux/statfs.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/fs_struct.h>
22#include "internal.h"
23
24static int cachefiles_daemon_open(struct inode *, struct file *);
25static int cachefiles_daemon_release(struct inode *, struct file *);
26static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 loff_t *);
28static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 size_t, loff_t *);
30static __poll_t cachefiles_daemon_poll(struct file *,
31 struct poll_table_struct *);
32static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46
47static unsigned long cachefiles_open;
48
49const struct file_operations cachefiles_daemon_fops = {
50 .owner = THIS_MODULE,
51 .open = cachefiles_daemon_open,
52 .release = cachefiles_daemon_release,
53 .read = cachefiles_daemon_read,
54 .write = cachefiles_daemon_write,
55 .poll = cachefiles_daemon_poll,
56 .llseek = noop_llseek,
57};
58
59struct cachefiles_daemon_cmd {
60 char name[8];
61 int (*handler)(struct cachefiles_cache *cache, char *args);
62};
63
64static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 { "bind", cachefiles_daemon_bind },
66 { "brun", cachefiles_daemon_brun },
67 { "bcull", cachefiles_daemon_bcull },
68 { "bstop", cachefiles_daemon_bstop },
69 { "cull", cachefiles_daemon_cull },
70 { "debug", cachefiles_daemon_debug },
71 { "dir", cachefiles_daemon_dir },
72 { "frun", cachefiles_daemon_frun },
73 { "fcull", cachefiles_daemon_fcull },
74 { "fstop", cachefiles_daemon_fstop },
75 { "inuse", cachefiles_daemon_inuse },
76 { "secctx", cachefiles_daemon_secctx },
77 { "tag", cachefiles_daemon_tag },
78#ifdef CONFIG_CACHEFILES_ONDEMAND
79 { "copen", cachefiles_ondemand_copen },
80 { "restore", cachefiles_ondemand_restore },
81#endif
82 { "", NULL }
83};
84
85
86/*
87 * Prepare a cache for caching.
88 */
89static int cachefiles_daemon_open(struct inode *inode, struct file *file)
90{
91 struct cachefiles_cache *cache;
92
93 _enter("");
94
95 /* only the superuser may do this */
96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM;
98
99 /* the cachefiles device may only be open once at a time */
100 if (xchg(&cachefiles_open, 1) == 1)
101 return -EBUSY;
102
103 /* allocate a cache record */
104 cache = kzalloc(size: sizeof(struct cachefiles_cache), GFP_KERNEL);
105 if (!cache) {
106 cachefiles_open = 0;
107 return -ENOMEM;
108 }
109
110 mutex_init(&cache->daemon_mutex);
111 init_waitqueue_head(&cache->daemon_pollwq);
112 INIT_LIST_HEAD(list: &cache->volumes);
113 INIT_LIST_HEAD(list: &cache->object_list);
114 spin_lock_init(&cache->object_list_lock);
115 refcount_set(r: &cache->unbind_pincount, n: 1);
116 xa_init_flags(xa: &cache->reqs, XA_FLAGS_ALLOC);
117 xa_init_flags(xa: &cache->ondemand_ids, XA_FLAGS_ALLOC1);
118
119 /* set default caching limits
120 * - limit at 1% free space and/or free files
121 * - cull below 5% free space and/or free files
122 * - cease culling above 7% free space and/or free files
123 */
124 cache->frun_percent = 7;
125 cache->fcull_percent = 5;
126 cache->fstop_percent = 1;
127 cache->brun_percent = 7;
128 cache->bcull_percent = 5;
129 cache->bstop_percent = 1;
130
131 file->private_data = cache;
132 cache->cachefilesd = file;
133 return 0;
134}
135
136static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
137{
138 struct xarray *xa = &cache->reqs;
139 struct cachefiles_req *req;
140 unsigned long index;
141
142 /*
143 * Make sure the following two operations won't be reordered.
144 * 1) set CACHEFILES_DEAD bit
145 * 2) flush requests in the xarray
146 * Otherwise the request may be enqueued after xarray has been
147 * flushed, leaving the orphan request never being completed.
148 *
149 * CPU 1 CPU 2
150 * ===== =====
151 * flush requests in the xarray
152 * test CACHEFILES_DEAD bit
153 * enqueue the request
154 * set CACHEFILES_DEAD bit
155 */
156 smp_mb();
157
158 xa_lock(xa);
159 xa_for_each(xa, index, req) {
160 req->error = -EIO;
161 complete(&req->done);
162 }
163 xa_unlock(xa);
164
165 xa_destroy(&cache->reqs);
166 xa_destroy(&cache->ondemand_ids);
167}
168
169void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
170{
171 if (refcount_dec_and_test(r: &cache->unbind_pincount)) {
172 cachefiles_daemon_unbind(cache);
173 cachefiles_open = 0;
174 kfree(objp: cache);
175 }
176}
177
178void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
179{
180 refcount_inc(r: &cache->unbind_pincount);
181}
182
183/*
184 * Release a cache.
185 */
186static int cachefiles_daemon_release(struct inode *inode, struct file *file)
187{
188 struct cachefiles_cache *cache = file->private_data;
189
190 _enter("");
191
192 ASSERT(cache);
193
194 set_bit(CACHEFILES_DEAD, addr: &cache->flags);
195
196 if (cachefiles_in_ondemand_mode(cache))
197 cachefiles_flush_reqs(cache);
198
199 /* clean up the control file interface */
200 cache->cachefilesd = NULL;
201 file->private_data = NULL;
202
203 cachefiles_put_unbind_pincount(cache);
204
205 _leave("");
206 return 0;
207}
208
209static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
210 char __user *_buffer, size_t buflen)
211{
212 unsigned long long b_released;
213 unsigned f_released;
214 char buffer[256];
215 int n;
216
217 /* check how much space the cache has */
218 cachefiles_has_space(cache, fnr: 0, bnr: 0, reason: cachefiles_has_space_check);
219
220 /* summarise */
221 f_released = atomic_xchg(v: &cache->f_released, new: 0);
222 b_released = atomic_long_xchg(v: &cache->b_released, new: 0);
223 clear_bit(CACHEFILES_STATE_CHANGED, addr: &cache->flags);
224
225 n = snprintf(buf: buffer, size: sizeof(buffer),
226 fmt: "cull=%c"
227 " frun=%llx"
228 " fcull=%llx"
229 " fstop=%llx"
230 " brun=%llx"
231 " bcull=%llx"
232 " bstop=%llx"
233 " freleased=%x"
234 " breleased=%llx",
235 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
236 (unsigned long long) cache->frun,
237 (unsigned long long) cache->fcull,
238 (unsigned long long) cache->fstop,
239 (unsigned long long) cache->brun,
240 (unsigned long long) cache->bcull,
241 (unsigned long long) cache->bstop,
242 f_released,
243 b_released);
244
245 if (n > buflen)
246 return -EMSGSIZE;
247
248 if (copy_to_user(to: _buffer, from: buffer, n) != 0)
249 return -EFAULT;
250
251 return n;
252}
253
254/*
255 * Read the cache state.
256 */
257static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
258 size_t buflen, loff_t *pos)
259{
260 struct cachefiles_cache *cache = file->private_data;
261
262 //_enter(",,%zu,", buflen);
263
264 if (!test_bit(CACHEFILES_READY, &cache->flags))
265 return 0;
266
267 if (cachefiles_in_ondemand_mode(cache))
268 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
269 else
270 return cachefiles_do_daemon_read(cache, _buffer, buflen);
271}
272
273/*
274 * Take a command from cachefilesd, parse it and act on it.
275 */
276static ssize_t cachefiles_daemon_write(struct file *file,
277 const char __user *_data,
278 size_t datalen,
279 loff_t *pos)
280{
281 const struct cachefiles_daemon_cmd *cmd;
282 struct cachefiles_cache *cache = file->private_data;
283 ssize_t ret;
284 char *data, *args, *cp;
285
286 //_enter(",,%zu,", datalen);
287
288 ASSERT(cache);
289
290 if (test_bit(CACHEFILES_DEAD, &cache->flags))
291 return -EIO;
292
293 if (datalen > PAGE_SIZE - 1)
294 return -EOPNOTSUPP;
295
296 /* drag the command string into the kernel so we can parse it */
297 data = memdup_user_nul(_data, datalen);
298 if (IS_ERR(ptr: data))
299 return PTR_ERR(ptr: data);
300
301 ret = -EINVAL;
302 if (memchr(p: data, c: '\0', size: datalen))
303 goto error;
304
305 /* strip any newline */
306 cp = memchr(p: data, c: '\n', size: datalen);
307 if (cp) {
308 if (cp == data)
309 goto error;
310
311 *cp = '\0';
312 }
313
314 /* parse the command */
315 ret = -EOPNOTSUPP;
316
317 for (args = data; *args; args++)
318 if (isspace(*args))
319 break;
320 if (*args) {
321 if (args == data)
322 goto error;
323 *args = '\0';
324 args = skip_spaces(++args);
325 }
326
327 /* run the appropriate command handler */
328 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
329 if (strcmp(cmd->name, data) == 0)
330 goto found_command;
331
332error:
333 kfree(objp: data);
334 //_leave(" = %zd", ret);
335 return ret;
336
337found_command:
338 mutex_lock(&cache->daemon_mutex);
339
340 ret = -EIO;
341 if (!test_bit(CACHEFILES_DEAD, &cache->flags))
342 ret = cmd->handler(cache, args);
343
344 mutex_unlock(lock: &cache->daemon_mutex);
345
346 if (ret == 0)
347 ret = datalen;
348 goto error;
349}
350
351/*
352 * Poll for culling state
353 * - use EPOLLOUT to indicate culling state
354 */
355static __poll_t cachefiles_daemon_poll(struct file *file,
356 struct poll_table_struct *poll)
357{
358 struct cachefiles_cache *cache = file->private_data;
359 XA_STATE(xas, &cache->reqs, 0);
360 struct cachefiles_req *req;
361 __poll_t mask;
362
363 poll_wait(filp: file, wait_address: &cache->daemon_pollwq, p: poll);
364 mask = 0;
365
366 if (cachefiles_in_ondemand_mode(cache)) {
367 if (!xa_empty(xa: &cache->reqs)) {
368 rcu_read_lock();
369 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
370 if (!cachefiles_ondemand_is_reopening_read(req)) {
371 mask |= EPOLLIN;
372 break;
373 }
374 }
375 rcu_read_unlock();
376 }
377 } else {
378 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
379 mask |= EPOLLIN;
380 }
381
382 if (test_bit(CACHEFILES_CULLING, &cache->flags))
383 mask |= EPOLLOUT;
384
385 return mask;
386}
387
388/*
389 * Give a range error for cache space constraints
390 * - can be tail-called
391 */
392static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
393 char *args)
394{
395 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
396
397 return -EINVAL;
398}
399
400/*
401 * Set the percentage of files at which to stop culling
402 * - command: "frun <N>%"
403 */
404static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
405{
406 unsigned long frun;
407
408 _enter(",%s", args);
409
410 if (!*args)
411 return -EINVAL;
412
413 frun = simple_strtoul(args, &args, 10);
414 if (args[0] != '%' || args[1] != '\0')
415 return -EINVAL;
416
417 if (frun <= cache->fcull_percent || frun >= 100)
418 return cachefiles_daemon_range_error(cache, args);
419
420 cache->frun_percent = frun;
421 return 0;
422}
423
424/*
425 * Set the percentage of files at which to start culling
426 * - command: "fcull <N>%"
427 */
428static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
429{
430 unsigned long fcull;
431
432 _enter(",%s", args);
433
434 if (!*args)
435 return -EINVAL;
436
437 fcull = simple_strtoul(args, &args, 10);
438 if (args[0] != '%' || args[1] != '\0')
439 return -EINVAL;
440
441 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
442 return cachefiles_daemon_range_error(cache, args);
443
444 cache->fcull_percent = fcull;
445 return 0;
446}
447
448/*
449 * Set the percentage of files at which to stop allocating
450 * - command: "fstop <N>%"
451 */
452static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
453{
454 unsigned long fstop;
455
456 _enter(",%s", args);
457
458 if (!*args)
459 return -EINVAL;
460
461 fstop = simple_strtoul(args, &args, 10);
462 if (args[0] != '%' || args[1] != '\0')
463 return -EINVAL;
464
465 if (fstop >= cache->fcull_percent)
466 return cachefiles_daemon_range_error(cache, args);
467
468 cache->fstop_percent = fstop;
469 return 0;
470}
471
472/*
473 * Set the percentage of blocks at which to stop culling
474 * - command: "brun <N>%"
475 */
476static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
477{
478 unsigned long brun;
479
480 _enter(",%s", args);
481
482 if (!*args)
483 return -EINVAL;
484
485 brun = simple_strtoul(args, &args, 10);
486 if (args[0] != '%' || args[1] != '\0')
487 return -EINVAL;
488
489 if (brun <= cache->bcull_percent || brun >= 100)
490 return cachefiles_daemon_range_error(cache, args);
491
492 cache->brun_percent = brun;
493 return 0;
494}
495
496/*
497 * Set the percentage of blocks at which to start culling
498 * - command: "bcull <N>%"
499 */
500static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
501{
502 unsigned long bcull;
503
504 _enter(",%s", args);
505
506 if (!*args)
507 return -EINVAL;
508
509 bcull = simple_strtoul(args, &args, 10);
510 if (args[0] != '%' || args[1] != '\0')
511 return -EINVAL;
512
513 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
514 return cachefiles_daemon_range_error(cache, args);
515
516 cache->bcull_percent = bcull;
517 return 0;
518}
519
520/*
521 * Set the percentage of blocks at which to stop allocating
522 * - command: "bstop <N>%"
523 */
524static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
525{
526 unsigned long bstop;
527
528 _enter(",%s", args);
529
530 if (!*args)
531 return -EINVAL;
532
533 bstop = simple_strtoul(args, &args, 10);
534 if (args[0] != '%' || args[1] != '\0')
535 return -EINVAL;
536
537 if (bstop >= cache->bcull_percent)
538 return cachefiles_daemon_range_error(cache, args);
539
540 cache->bstop_percent = bstop;
541 return 0;
542}
543
544/*
545 * Set the cache directory
546 * - command: "dir <name>"
547 */
548static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
549{
550 char *dir;
551
552 _enter(",%s", args);
553
554 if (!*args) {
555 pr_err("Empty directory specified\n");
556 return -EINVAL;
557 }
558
559 if (cache->rootdirname) {
560 pr_err("Second cache directory specified\n");
561 return -EEXIST;
562 }
563
564 dir = kstrdup(s: args, GFP_KERNEL);
565 if (!dir)
566 return -ENOMEM;
567
568 cache->rootdirname = dir;
569 return 0;
570}
571
572/*
573 * Set the cache security context
574 * - command: "secctx <ctx>"
575 */
576static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
577{
578 char *secctx;
579
580 _enter(",%s", args);
581
582 if (!*args) {
583 pr_err("Empty security context specified\n");
584 return -EINVAL;
585 }
586
587 if (cache->secctx) {
588 pr_err("Second security context specified\n");
589 return -EINVAL;
590 }
591
592 secctx = kstrdup(s: args, GFP_KERNEL);
593 if (!secctx)
594 return -ENOMEM;
595
596 cache->secctx = secctx;
597 return 0;
598}
599
600/*
601 * Set the cache tag
602 * - command: "tag <name>"
603 */
604static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
605{
606 char *tag;
607
608 _enter(",%s", args);
609
610 if (!*args) {
611 pr_err("Empty tag specified\n");
612 return -EINVAL;
613 }
614
615 if (cache->tag)
616 return -EEXIST;
617
618 tag = kstrdup(s: args, GFP_KERNEL);
619 if (!tag)
620 return -ENOMEM;
621
622 cache->tag = tag;
623 return 0;
624}
625
626/*
627 * Request a node in the cache be culled from the current working directory
628 * - command: "cull <name>"
629 */
630static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
631{
632 struct path path;
633 const struct cred *saved_cred;
634 int ret;
635
636 _enter(",%s", args);
637
638 if (strchr(args, '/'))
639 goto inval;
640
641 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
642 pr_err("cull applied to unready cache\n");
643 return -EIO;
644 }
645
646 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
647 pr_err("cull applied to dead cache\n");
648 return -EIO;
649 }
650
651 get_fs_pwd(current->fs, pwd: &path);
652
653 if (!d_can_lookup(dentry: path.dentry))
654 goto notdir;
655
656 cachefiles_begin_secure(cache, saved_cred: &saved_cred);
657 ret = cachefiles_cull(cache, dir: path.dentry, filename: args);
658 cachefiles_end_secure(cache, saved_cred);
659
660 path_put(&path);
661 _leave(" = %d", ret);
662 return ret;
663
664notdir:
665 path_put(&path);
666 pr_err("cull command requires dirfd to be a directory\n");
667 return -ENOTDIR;
668
669inval:
670 pr_err("cull command requires dirfd and filename\n");
671 return -EINVAL;
672}
673
674/*
675 * Set debugging mode
676 * - command: "debug <mask>"
677 */
678static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
679{
680 unsigned long mask;
681
682 _enter(",%s", args);
683
684 mask = simple_strtoul(args, &args, 0);
685 if (args[0] != '\0')
686 goto inval;
687
688 cachefiles_debug = mask;
689 _leave(" = 0");
690 return 0;
691
692inval:
693 pr_err("debug command requires mask\n");
694 return -EINVAL;
695}
696
697/*
698 * Find out whether an object in the current working directory is in use or not
699 * - command: "inuse <name>"
700 */
701static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
702{
703 struct path path;
704 const struct cred *saved_cred;
705 int ret;
706
707 //_enter(",%s", args);
708
709 if (strchr(args, '/'))
710 goto inval;
711
712 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
713 pr_err("inuse applied to unready cache\n");
714 return -EIO;
715 }
716
717 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
718 pr_err("inuse applied to dead cache\n");
719 return -EIO;
720 }
721
722 get_fs_pwd(current->fs, pwd: &path);
723
724 if (!d_can_lookup(dentry: path.dentry))
725 goto notdir;
726
727 cachefiles_begin_secure(cache, saved_cred: &saved_cred);
728 ret = cachefiles_check_in_use(cache, dir: path.dentry, filename: args);
729 cachefiles_end_secure(cache, saved_cred);
730
731 path_put(&path);
732 //_leave(" = %d", ret);
733 return ret;
734
735notdir:
736 path_put(&path);
737 pr_err("inuse command requires dirfd to be a directory\n");
738 return -ENOTDIR;
739
740inval:
741 pr_err("inuse command requires dirfd and filename\n");
742 return -EINVAL;
743}
744
745/*
746 * Bind a directory as a cache
747 */
748static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
749{
750 _enter("{%u,%u,%u,%u,%u,%u},%s",
751 cache->frun_percent,
752 cache->fcull_percent,
753 cache->fstop_percent,
754 cache->brun_percent,
755 cache->bcull_percent,
756 cache->bstop_percent,
757 args);
758
759 if (cache->fstop_percent >= cache->fcull_percent ||
760 cache->fcull_percent >= cache->frun_percent ||
761 cache->frun_percent >= 100)
762 return -ERANGE;
763
764 if (cache->bstop_percent >= cache->bcull_percent ||
765 cache->bcull_percent >= cache->brun_percent ||
766 cache->brun_percent >= 100)
767 return -ERANGE;
768
769 if (!cache->rootdirname) {
770 pr_err("No cache directory specified\n");
771 return -EINVAL;
772 }
773
774 /* Don't permit already bound caches to be re-bound */
775 if (test_bit(CACHEFILES_READY, &cache->flags)) {
776 pr_err("Cache already bound\n");
777 return -EBUSY;
778 }
779
780 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
781 if (!strcmp(args, "ondemand")) {
782 set_bit(CACHEFILES_ONDEMAND_MODE, addr: &cache->flags);
783 } else if (*args) {
784 pr_err("Invalid argument to the 'bind' command\n");
785 return -EINVAL;
786 }
787 } else if (*args) {
788 pr_err("'bind' command doesn't take an argument\n");
789 return -EINVAL;
790 }
791
792 /* Make sure we have copies of the tag string */
793 if (!cache->tag) {
794 /*
795 * The tag string is released by the fops->release()
796 * function, so we don't release it on error here
797 */
798 cache->tag = kstrdup(s: "CacheFiles", GFP_KERNEL);
799 if (!cache->tag)
800 return -ENOMEM;
801 }
802
803 return cachefiles_add_cache(cache);
804}
805
806/*
807 * Unbind a cache.
808 */
809static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
810{
811 _enter("");
812
813 if (test_bit(CACHEFILES_READY, &cache->flags))
814 cachefiles_withdraw_cache(cache);
815
816 cachefiles_put_directory(dir: cache->graveyard);
817 cachefiles_put_directory(dir: cache->store);
818 mntput(mnt: cache->mnt);
819 put_cred(cred: cache->cache_cred);
820
821 kfree(objp: cache->rootdirname);
822 kfree(objp: cache->secctx);
823 kfree(objp: cache->tag);
824
825 _leave("");
826}
827

source code of linux/fs/cachefiles/daemon.c