1// SPDX-License-Identifier: GPL-2.0-or-later
2/* NFS filesystem cache interface
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/nfs_fs.h>
13#include <linux/nfs_fs_sb.h>
14#include <linux/in6.h>
15#include <linux/seq_file.h>
16#include <linux/slab.h>
17#include <linux/iversion.h>
18#include <linux/xarray.h>
19#include <linux/fscache.h>
20#include <linux/netfs.h>
21
22#include "internal.h"
23#include "iostat.h"
24#include "fscache.h"
25#include "nfstrace.h"
26
27#define NFS_MAX_KEY_LEN 1000
28
29static bool nfs_append_int(char *key, int *_len, unsigned long long x)
30{
31 if (*_len > NFS_MAX_KEY_LEN)
32 return false;
33 if (x == 0)
34 key[(*_len)++] = ',';
35 else
36 *_len += sprintf(buf: key + *_len, fmt: ",%llx", x);
37 return true;
38}
39
40/*
41 * Get the per-client index cookie for an NFS client if the appropriate mount
42 * flag was set
43 * - We always try and get an index cookie for the client, but get filehandle
44 * cookies on a per-superblock basis, depending on the mount flags
45 */
46static bool nfs_fscache_get_client_key(struct nfs_client *clp,
47 char *key, int *_len)
48{
49 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
50 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
51
52 *_len += snprintf(buf: key + *_len, NFS_MAX_KEY_LEN - *_len,
53 fmt: ",%u.%u,%x",
54 clp->rpc_ops->version,
55 clp->cl_minorversion,
56 clp->cl_addr.ss_family);
57
58 switch (clp->cl_addr.ss_family) {
59 case AF_INET:
60 if (!nfs_append_int(key, _len, x: sin->sin_port) ||
61 !nfs_append_int(key, _len, x: sin->sin_addr.s_addr))
62 return false;
63 return true;
64
65 case AF_INET6:
66 if (!nfs_append_int(key, _len, x: sin6->sin6_port) ||
67 !nfs_append_int(key, _len, x: sin6->sin6_addr.s6_addr32[0]) ||
68 !nfs_append_int(key, _len, x: sin6->sin6_addr.s6_addr32[1]) ||
69 !nfs_append_int(key, _len, x: sin6->sin6_addr.s6_addr32[2]) ||
70 !nfs_append_int(key, _len, x: sin6->sin6_addr.s6_addr32[3]))
71 return false;
72 return true;
73
74 default:
75 printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
76 clp->cl_addr.ss_family);
77 return false;
78 }
79}
80
81/*
82 * Get the cache cookie for an NFS superblock.
83 *
84 * The default uniquifier is just an empty string, but it may be overridden
85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
86 * superblock across an automount point of some nature.
87 */
88int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
89{
90 struct fscache_volume *vcookie;
91 struct nfs_server *nfss = NFS_SB(s: sb);
92 unsigned int len = 3;
93 char *key;
94
95 if (uniq) {
96 nfss->fscache_uniq = kmemdup_nul(s: uniq, len: ulen, GFP_KERNEL);
97 if (!nfss->fscache_uniq)
98 return -ENOMEM;
99 }
100
101 key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
102 if (!key)
103 return -ENOMEM;
104
105 memcpy(key, "nfs", 3);
106 if (!nfs_fscache_get_client_key(clp: nfss->nfs_client, key, len: &len) ||
107 !nfs_append_int(key, len: &len, x: nfss->fsid.major) ||
108 !nfs_append_int(key, len: &len, x: nfss->fsid.minor) ||
109 !nfs_append_int(key, len: &len, x: sb->s_flags & NFS_SB_MASK) ||
110 !nfs_append_int(key, len: &len, x: nfss->flags) ||
111 !nfs_append_int(key, len: &len, x: nfss->rsize) ||
112 !nfs_append_int(key, len: &len, x: nfss->wsize) ||
113 !nfs_append_int(key, len: &len, x: nfss->acregmin) ||
114 !nfs_append_int(key, len: &len, x: nfss->acregmax) ||
115 !nfs_append_int(key, len: &len, x: nfss->acdirmin) ||
116 !nfs_append_int(key, len: &len, x: nfss->acdirmax) ||
117 !nfs_append_int(key, len: &len, x: nfss->client->cl_auth->au_flavor))
118 goto out;
119
120 if (ulen > 0) {
121 if (ulen > NFS_MAX_KEY_LEN - len)
122 goto out;
123 key[len++] = ',';
124 memcpy(key + len, uniq, ulen);
125 len += ulen;
126 }
127 key[len] = 0;
128
129 /* create a cache index for looking up filehandles */
130 vcookie = fscache_acquire_volume(volume_key: key,
131 NULL, /* preferred_cache */
132 NULL, coherency_len: 0 /* coherency_data */);
133 if (IS_ERR(ptr: vcookie)) {
134 if (vcookie != ERR_PTR(error: -EBUSY)) {
135 kfree(objp: key);
136 return PTR_ERR(ptr: vcookie);
137 }
138 pr_err("NFS: Cache volume key already in use (%s)\n", key);
139 vcookie = NULL;
140 }
141 nfss->fscache = vcookie;
142
143out:
144 kfree(objp: key);
145 return 0;
146}
147
148/*
149 * release a per-superblock cookie
150 */
151void nfs_fscache_release_super_cookie(struct super_block *sb)
152{
153 struct nfs_server *nfss = NFS_SB(s: sb);
154
155 fscache_relinquish_volume(volume: nfss->fscache, NULL, invalidate: false);
156 nfss->fscache = NULL;
157 kfree(objp: nfss->fscache_uniq);
158}
159
160/*
161 * Initialise the per-inode cache cookie pointer for an NFS inode.
162 */
163void nfs_fscache_init_inode(struct inode *inode)
164{
165 struct nfs_fscache_inode_auxdata auxdata;
166 struct nfs_server *nfss = NFS_SERVER(inode);
167 struct nfs_inode *nfsi = NFS_I(inode);
168
169 netfs_inode(inode)->cache = NULL;
170 if (!(nfss->fscache && S_ISREG(inode->i_mode)))
171 return;
172
173 nfs_fscache_update_auxdata(auxdata: &auxdata, inode);
174
175 netfs_inode(inode)->cache = fscache_acquire_cookie(
176 volume: nfss->fscache,
177 advice: 0,
178 index_key: nfsi->fh.data, /* index_key */
179 index_key_len: nfsi->fh.size,
180 aux_data: &auxdata, /* aux_data */
181 aux_data_len: sizeof(auxdata),
182 object_size: i_size_read(inode));
183
184 if (netfs_inode(inode)->cache)
185 mapping_set_release_always(mapping: inode->i_mapping);
186}
187
188/*
189 * Release a per-inode cookie.
190 */
191void nfs_fscache_clear_inode(struct inode *inode)
192{
193 fscache_relinquish_cookie(cookie: netfs_i_cookie(ctx: netfs_inode(inode)), retire: false);
194 netfs_inode(inode)->cache = NULL;
195}
196
197/*
198 * Enable or disable caching for a file that is being opened as appropriate.
199 * The cookie is allocated when the inode is initialised, but is not enabled at
200 * that time. Enablement is deferred to file-open time to avoid stat() and
201 * access() thrashing the cache.
202 *
203 * For now, with NFS, only regular files that are open read-only will be able
204 * to use the cache.
205 *
206 * We enable the cache for an inode if we open it read-only and it isn't
207 * currently open for writing. We disable the cache if the inode is open
208 * write-only.
209 *
210 * The caller uses the file struct to pin i_writecount on the inode before
211 * calling us when a file is opened for writing, so we can make use of that.
212 *
213 * Note that this may be invoked multiple times in parallel by parallel
214 * nfs_open() functions.
215 */
216void nfs_fscache_open_file(struct inode *inode, struct file *filp)
217{
218 struct nfs_fscache_inode_auxdata auxdata;
219 struct fscache_cookie *cookie = netfs_i_cookie(ctx: netfs_inode(inode));
220 bool open_for_write = inode_is_open_for_write(inode);
221
222 if (!fscache_cookie_valid(cookie))
223 return;
224
225 fscache_use_cookie(cookie, will_modify: open_for_write);
226 if (open_for_write) {
227 nfs_fscache_update_auxdata(auxdata: &auxdata, inode);
228 fscache_invalidate(cookie, aux_data: &auxdata, size: i_size_read(inode),
229 FSCACHE_INVAL_DIO_WRITE);
230 }
231}
232EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
233
234void nfs_fscache_release_file(struct inode *inode, struct file *filp)
235{
236 struct nfs_fscache_inode_auxdata auxdata;
237 struct fscache_cookie *cookie = netfs_i_cookie(ctx: netfs_inode(inode));
238 loff_t i_size = i_size_read(inode);
239
240 nfs_fscache_update_auxdata(auxdata: &auxdata, inode);
241 fscache_unuse_cookie(cookie, aux_data: &auxdata, object_size: &i_size);
242}
243
244int nfs_netfs_read_folio(struct file *file, struct folio *folio)
245{
246 if (!netfs_inode(inode: folio_inode(folio))->cache)
247 return -ENOBUFS;
248
249 return netfs_read_folio(file, folio);
250}
251
252int nfs_netfs_readahead(struct readahead_control *ractl)
253{
254 struct inode *inode = ractl->mapping->host;
255
256 if (!netfs_inode(inode)->cache)
257 return -ENOBUFS;
258
259 netfs_readahead(ractl);
260 return 0;
261}
262
263static atomic_t nfs_netfs_debug_id;
264static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
265{
266 rreq->netfs_priv = get_nfs_open_context(ctx: nfs_file_open_context(filp: file));
267 rreq->debug_id = atomic_inc_return(v: &nfs_netfs_debug_id);
268
269 return 0;
270}
271
272static void nfs_netfs_free_request(struct netfs_io_request *rreq)
273{
274 put_nfs_open_context(ctx: rreq->netfs_priv);
275}
276
277static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
278{
279 return fscache_begin_read_operation(cres: &rreq->cache_resources,
280 cookie: netfs_i_cookie(ctx: netfs_inode(inode: rreq->inode)));
281}
282
283static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
284{
285 struct nfs_netfs_io_data *netfs;
286
287 netfs = kzalloc(size: sizeof(*netfs), GFP_KERNEL_ACCOUNT);
288 if (!netfs)
289 return NULL;
290 netfs->sreq = sreq;
291 refcount_set(r: &netfs->refcount, n: 1);
292 return netfs;
293}
294
295static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
296{
297 size_t rsize = NFS_SB(s: sreq->rreq->inode->i_sb)->rsize;
298
299 sreq->len = min(sreq->len, rsize);
300 return true;
301}
302
303static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
304{
305 struct nfs_netfs_io_data *netfs;
306 struct nfs_pageio_descriptor pgio;
307 struct inode *inode = sreq->rreq->inode;
308 struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
309 struct page *page;
310 int err;
311 pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
312 pgoff_t last = ((sreq->start + sreq->len -
313 sreq->transferred - 1) >> PAGE_SHIFT);
314 XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
315
316 nfs_pageio_init_read(pgio: &pgio, inode, force_mds: false,
317 compl_ops: &nfs_async_read_completion_ops);
318
319 netfs = nfs_netfs_alloc(sreq);
320 if (!netfs)
321 return netfs_subreq_terminated(sreq, -ENOMEM, false);
322
323 pgio.pg_netfs = netfs; /* used in completion */
324
325 xas_lock(&xas);
326 xas_for_each(&xas, page, last) {
327 /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
328 xas_pause(&xas);
329 xas_unlock(&xas);
330 err = nfs_read_add_folio(pgio: &pgio, ctx, page_folio(page));
331 if (err < 0) {
332 netfs->error = err;
333 goto out;
334 }
335 xas_lock(&xas);
336 }
337 xas_unlock(&xas);
338out:
339 nfs_pageio_complete_read(pgio: &pgio);
340 nfs_netfs_put(netfs);
341}
342
343void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
344{
345 struct nfs_netfs_io_data *netfs = hdr->netfs;
346
347 if (!netfs)
348 return;
349
350 nfs_netfs_get(netfs);
351}
352
353int nfs_netfs_folio_unlock(struct folio *folio)
354{
355 struct inode *inode = folio_file_mapping(folio)->host;
356
357 /*
358 * If fscache is enabled, netfs will unlock pages.
359 */
360 if (netfs_inode(inode)->cache)
361 return 0;
362
363 return 1;
364}
365
366void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
367{
368 struct nfs_netfs_io_data *netfs = hdr->netfs;
369 struct netfs_io_subrequest *sreq;
370
371 if (!netfs)
372 return;
373
374 sreq = netfs->sreq;
375 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
376 __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
377
378 if (hdr->error)
379 netfs->error = hdr->error;
380 else
381 atomic64_add(i: hdr->res.count, v: &netfs->transferred);
382
383 nfs_netfs_put(netfs);
384 hdr->netfs = NULL;
385}
386
387const struct netfs_request_ops nfs_netfs_ops = {
388 .init_request = nfs_netfs_init_request,
389 .free_request = nfs_netfs_free_request,
390 .begin_cache_operation = nfs_netfs_begin_cache_operation,
391 .issue_read = nfs_netfs_issue_read,
392 .clamp_length = nfs_netfs_clamp_length
393};
394

source code of linux/fs/nfs/fscache.c